repo_name
stringlengths
7
71
file_path
stringlengths
5
118
context
list
import_statement
stringlengths
45
12.5k
token_num
int64
641
99.4k
cropped_code
stringlengths
44
17k
all_code
stringlengths
43
754k
next_line
stringlengths
2
330
gold_snippet_index
int64
0
68
created_at
stringlengths
25
25
level
stringclasses
9 values
microsoft/Everything-of-Thoughts-XoT
xot_all_in_one/xot/controller/controller.py
[ { "identifier": "IO_Solver", "path": "xot_all_in_one/xot/controller/solver/io_solver.py", "snippet": "class IO_Solver:\n def __init__(self, args, gpt, game, prompter, parser, to_print=False):\n self.args = args\n self.gpt = gpt\n self.game = game\n self.prompter = prompter\n self.parser = parser\n self.to_print = to_print\n\n # comment: this is the main function of the io solver\n def solve(self, idx):\n \"\"\"_summary_\n Parameters:\n idx: index of the test board\n \n Return:\n ys: a list of solutions\n info: a dictionary of information\n \"\"\"\n x = self.game.getOneTestBoard(idx)\n y = ''\n if not self.args.multi_solution:\n prompt = self.prompter.standard_prompt_wrap(x, y)\n else:\n prompt = self.prompter.standard_prompt_wrap_multi(x, y)\n\n instruct = self.prompter.get_instruction_prompt()\n samples = self.gpt(prompt, instruct, n=self.args.param.n_generate_sample, stop=self.args.gpt.stop)\n\n ys= [y + _ for _ in samples]\n if self.to_print:\n print('io_solve -- input: ', x)\n print('io_solve -- prompt: ', prompt)\n print('io_solve -- output: ', samples)\n \n info = {}\n\n return ys, info" }, { "identifier": "CoT_Solver", "path": "xot_all_in_one/xot/controller/solver/cot_solver.py", "snippet": "class CoT_Solver:\n def __init__(self, args, gpt, game, prompter, parser, to_print=False):\n self.args = args\n self.gpt = gpt\n self.game = game\n self.prompter = prompter\n self.parser = parser\n self.to_print = to_print\n\n\n def solve(self, idx):\n '''_summary_\n Parameters:\n idx: index of the test board\n \n Return:\n ys: a list of solutions\n info: a dictionary of information\n '''\n x = self.game.getOneTestBoard(idx)\n y = ''\n\n if not self.args.multi_solution:\n prompt = self.prompter.cot_prompt_wrap(x, y)\n else:\n prompt = self.prompter.cot_prompt_wrap_multi(x, y)\n instruct = self.prompter.get_instruction_prompt()\n samples = self.gpt(prompt, instruct, n=self.args.param.n_generate_sample, stop=self.args.gpt.stop)\n\n ys= [y + _ for _ in samples]\n if self.to_print:\n print('cot_solve -- input: ', x)\n print('cot_solve -- prompt: ', prompt)\n print('cot_solve -- output: ', samples)\n\n info = {}\n \n return ys, info" }, { "identifier": "ToT_Solver", "path": "xot_all_in_one/xot/controller/solver/tot_solver.py", "snippet": "class ToT_Solver:\n def __init__(self, args, gpt, game, prompter, parser, to_print=False):\n \"\"\"\n Initialize the ToT_Solver with the necessary components.\n\n Args:\n args: The arguments for the solver.\n gpt: The GPT model to be used in the solver.\n game: The game or simulation to be solved.\n prompter: The prompter for generating prompts for the GPT model.\n parser: The parser for parsing the output of the GPT model.\n to_print (bool, optional): A flag indicating whether to print debug information. Defaults to False.\n \"\"\"\n self.args = args\n self.gpt = gpt\n self.game = game\n self.prompter = prompter\n self.parser = parser\n self.to_print = to_print\n \n def get_max_index(self, lst):\n \"\"\"\n This function returns the index of the maximum value in a list.\n\n Args:\n lst (list): The list in which the maximum value is to be found.\n \n Returns:\n max_indices: The index of the maximum value in the list.\n \"\"\"\n max_value = max(lst)\n max_indices = [index for index, value in enumerate(lst) if value == max_value]\n return random.choice(max_indices)\n\n def get_proposals(self, args, gpt, prompter, x, y, isFinished): \n \"\"\"\n This function generates proposals for a partial output 'y' using the 'get_proposals' method.\n\n Args:\n args (gpt): The arguments to be passed to the 'get_proposals' method.\n gpt: The GPT model to be used in the 'get_proposals' method.\n prompter: The prompter to be used in the 'get_proposals' method.\n x: The input to be passed to the 'get_proposals' method.\n y: The partial output for which proposals are to be generated.\n isFinished (bool): A flag indicating whether the partial output 'y' is a complete output.\n \n Returns:\n proposals: A list of generated proposals.\n current_state: The current state of the game after the partial output 'y' is applied.\n \"\"\"\n instruct = prompter.get_instruction_prompt()\n propose_prompt, current_state = prompter.propose_prompt_wrap(x, y, isFinished)\n \n proposals = gpt(propose_prompt, instruct, n=args.param.n_generate_sample, stop=args.gpt.stop)[0]\n if self.to_print:\n print('propose_prompt', propose_prompt)\n print('proposals', proposals)\n\n if isFinished:\n return [proposals + '\\n'], current_state\n else:\n if args.env == 'game24':\n proposals_precheck = proposals.strip().split('\\n')\n proposals_aftercheck = []\n for idx, p in enumerate(proposals_precheck):\n try:\n exp = p.lower().split(\"=\")[0].strip()\n terms = exp.split(' ')\n num1, _, num2 = terms[0], terms[1], terms[2]\n if num1 in current_state and num2 in current_state:\n res = eval(exp)\n proposals_aftercheck.append(p)\n except:\n continue\n else:\n proposals_aftercheck = proposals.replace('\\n', '').strip().split(', ')\n \n return [y + _ + '\\n' for _ in proposals_aftercheck], current_state\n \n\n def get_value_tot(self, args, gpt, prompter, parser, x, y, cache_value=True):\n \"\"\"\n This function calculates the total value for a partial output 'y' using the 'get_value' method.\n It uses a local cache to avoid duplicate calculations.\n\n Args:\n args (gpt): The arguments to be passed to the 'get_value' method.\n gpt: The GPT model to be used in the 'get_value' method.\n prompter: The prompter to be used in the 'get_value' method.\n parser: The parser to be used in the 'get_value' method.\n x: The input to be passed to the 'get_value' method.\n y: The partial output for which the total value is to be calculated.\n cache_value (bool, optional): A flag indicating whether to cache the calculated values. Defaults to True.\n \n Returns:\n value: The calculated value for the partial output 'y'.\n \"\"\"\n instruct = prompter.get_instruction_prompt()\n value_prompt = prompter.value_prompt_wrap(x, y)\n \n if cache_value and value_prompt in prompter.value_cache:\n return prompter.value_cache[value_prompt]\n value_outputs = gpt(value_prompt, instruct, n=args.param.n_generate_sample, stop=args.gpt.stop)\n value = parser.value_outputs_unwrap(x, y, value_outputs)\n \n if self.to_print:\n print('value_prompt', value_prompt)\n print('value_outputs', value_outputs)\n \n if cache_value:\n prompter.value_cache[value_prompt] = value\n return value\n\n\n def get_values_tot(self, args, gpt, prompter, parser, x, ys, cache_value=True):\n \"\"\"\n This function calculates the total value for each partial output in 'ys' using the 'get_value_tot' method.\n It uses a local cache to avoid duplicate calculations.\n\n Args:\n args (gpt): The arguments to be passed to the 'get_value_tot' method.\n gpt: The GPT model to be used in the 'get_value_tot' method.\n prompter: The prompter to be used in the 'get_value_tot' method.\n parser: The parser to be used in the 'get_value_tot' method.\n x: The input to be passed to the 'get_value_tot' method.\n ys (list): A list of partial outputs for which the total value is to be calculated.\n cache_value (bool, optional): A flag indicating whether to cache the calculated values. Defaults to True.\n\n Returns:\n values: A list of calculated values for each partial output in 'ys'.\n \"\"\"\n values = []\n local_value_cache = {}\n for y in ys: # each partial output\n if y in local_value_cache: # avoid duplicate calculations\n value = 0\n else: \n value = self.get_value_tot(args, gpt, prompter, parser, x, y, cache_value=cache_value)\n local_value_cache[y] = value\n values.append(value)\n return values\n \n\n def solve(self, idx):\n \"\"\"_summary_\n Parameters:\n idx: index of the test board\n\n Return:\n ys: a list of solutions\n info: a dictionary of information\n \"\"\"\n x = self.game.getOneTestBoard(idx)\n\n\n if self.args.multi_solution and self.args.env == 'cube':\n total_game_step = self.args.task.total_game_step + 3\n elif self.args.multi_solution and self.args.env == 'npuzzle':\n total_game_step = self.args.task.total_game_step + 3\n elif not self.args.param.last_step and self.args.env == 'game24':\n total_game_step = self.args.task.total_game_step - 1\n else: \n total_game_step = self.args.task.total_game_step\n\n\n ys = [''] # current output candidates\n infos = []\n isFinished = False\n for step in range(total_game_step+1):\n if self.to_print:\n print('Current Step: %s'%(step+1))\n # generation\n new_ys = []\n for y in ys:\n ys_, current_state = self.get_proposals(self.args, self.gpt, self.prompter, x, y, isFinished)\n new_ys.append(ys_)\n \n new_ys = list(itertools.chain(*new_ys))\n\n if self.to_print:\n print('new_ys', new_ys)\n\n if isFinished:\n infos.append({'step': step, 'x': str(x), 'thoughts': ys, 'answer': new_ys})\n ys = new_ys\n break\n \n # evaluation\n values = self.get_values_tot(self.args, self.gpt, self.prompter, self.parser, x, new_ys)\n if len(values) == 0:\n isFinished = True\n continue\n\n # selection\n if self.args.param.n_select_sample == 1: # b=1\n max_values = max(values)\n select_ids = [self.get_max_index(values)]\n select_new_ys = [new_ys[select_id] for select_id in select_ids]\n else:\n ids = list(range(len(new_ys)))\n max_values = max(values)\n select_ids = sorted(ids, key=lambda x: values[x], reverse=True)[:self.args.param.n_select_sample]\n select_new_ys = [new_ys[select_id] for select_id in select_ids]\n\n # log\n if self.to_print: \n sorted_new_ys, sorted_values = zip(*sorted(zip(new_ys, values), key=lambda x: x[1], reverse=True))\n print(f'-- new_ys --: {sorted_new_ys}\\n-- sol values --: {sorted_values}\\n-- choices --: {select_new_ys}\\n')\n \n infos.append({'step': step, 'x': str(x), 'ys': ys, 'new_ys': new_ys, 'values': values, 'select_new_ys': select_new_ys})\n ys = select_new_ys\n\n if self.args.env == 'game24':\n isFinished = step == total_game_step - 1 # or float(max_values) == 0.001\n else:\n moves = self.parser.extract_top_select(select_new_ys)\n # Condition to Stop: 1. One of the candiates Reach the Correct Answer; 2. Reach the maximum step; 3. only left impossible answer\n success = False\n for m in moves:\n success = success or self.game.isFinishing(x, m) \n isFinished = success or step == total_game_step - 1 # or float(max_values) == 0.001 \n\n if self.to_print: \n print(ys)\n\n info = {'steps': infos}\n\n return ys, info" }, { "identifier": "GoT_Solver", "path": "xot_all_in_one/xot/controller/solver/got_solver.py", "snippet": "class GoT_Solver:\n \"\"\"\n A class used to solve a game using a GPT model.\n\n ...\n\n Attributes\n ----------\n args : object\n a configuration object with various parameters\n gpt : object\n a GPT model used for generating proposals and selections\n game : object\n a game object representing the current state of the game\n prompter : object\n an object used to generate prompts for the GPT model\n parser : object\n an object used to parse the outputs of the GPT model\n\n Methods\n -------\n get_proposals(args, gpt, prompter, x, y, isFinished)\n Generates a set of proposals or possible solutions to the game.\n get_select_got(args, gpt, prompter, parser, x, ys, cache_value=True)\n Selects the best proposal from a set of proposals.\n solve(idx)\n Solves the game by generating proposals and selecting the best ones until the game is finished.\n \"\"\"\n def __init__(self, args, gpt, game, prompter, parser, to_print=False):\n \"\"\"Initializes the GoT_Solver with the given arguments.\"\"\"\n self.args = args\n self.gpt = gpt\n self.game = game\n self.prompter = prompter\n self.parser = parser\n self.to_print = to_print\n \n \n def get_proposals(self, args, gpt, prompter, x, y, isFinished):\n \"\"\"\n Generates a set of proposals or possible solutions to the game.\n\n Parameters:\n args (object): a configuration object with various parameters\n gpt (object): a GPT model used for generating proposals\n prompter (object): an object used to generate prompts for the GPT model\n x (object): the current state of the game\n y (object): the current output candidates\n isFinished (bool): a flag indicating whether the game is finished or not\n\n Returns:\n list: a list of proposals\n object: the current state of the game\n \"\"\"\n # Get instruction prompt\n instruct = prompter.get_instruction_prompt()\n # Get propose prompt and current state\n propose_prompt, current_state = prompter.propose_prompt_wrap(x, y, isFinished)\n # Generate proposals\n proposals = gpt(propose_prompt, instruct, n=args.param.n_generate_sample, stop=args.gpt.stop)[0]\n\n # Print propose prompt and proposals if in debug mode\n if self.to_print:\n print('propose_prompt', propose_prompt)\n print('proposals', proposals)\n\n # If game is finished, return proposals\n if isFinished:\n return [proposals + '\\n'], current_state\n else:\n # If game environment is 'game24', perform precheck on proposals\n if args.env == 'game24':\n proposals_precheck = proposals.strip().split('\\n')\n proposals_aftercheck = []\n for idx, p in enumerate(proposals_precheck):\n try:\n exp = p.lower().split(\"=\")[0].strip()\n terms = exp.split(' ')\n num1, _, num2 = terms[0], terms[1], terms[2]\n if num1 in current_state and num2 in current_state:\n res = eval(exp)\n proposals_aftercheck.append(p)\n except:\n continue\n else:\n # If game environment is not 'game24', split proposals\n proposals_aftercheck = proposals.replace('\\n', '').strip().split(', ')\n \n # Return proposals after check\n return [y + _ + '\\n' for _ in proposals_aftercheck], current_state\n\n\n def get_select_got(self, args, gpt, prompter, parser, x, ys, cache_value=True):\n \"\"\"\n Selects the best proposal from a set of proposals.\n\n Parameters:\n args (object): a configuration object with various parameters\n gpt (object): a GPT model used for generating selections\n prompter (object): an object used to generate prompts for the GPT model\n parser (object): an object used to parse the outputs of the GPT model\n x (object): the current state of the game\n ys (list): a list of proposals\n cache_value (bool): a flag indicating whether to cache the value or not\n\n Returns:\n list: a list of selected proposals\n \"\"\"\n # Get instruction prompt\n instruct = prompter.get_instruction_prompt()\n # Get select prompt\n select_prompt = prompter.select_prompt_wrap(x, ys, args.param.n_select_sample)\n # Generate select outputs\n select_outputs = gpt(select_prompt, instruct, n=args.param.n_generate_sample, stop=args.gpt.stop)\n # Unwrap select outputs and select the best one\n select = parser.select_outputs_unwrap(x, ys, select_outputs, args.multi_solution)\n\n # Print select prompt and select outputs if in debug mode\n if self.to_print:\n print('select_prompt', select_prompt)\n print('select_outputs', select_outputs)\n \n # Return selected proposals\n return select\n\n\n def solve(self, idx):\n \"\"\"\n Solves the game by generating proposals and selecting the best ones until the game is finished.\n\n Parameters:\n idx (int): the index of the game to be solved\n\n Returns:\n list: a list of final proposals\n dict: a dictionary of steps taken to solve the game\n \"\"\"\n # Get one test board from the game\n x = self.game.getOneTestBoard(idx)\n\n # Set total game step based on the game environment and whether multiple solutions are allowed\n if self.args.multi_solution and self.args.env == 'cube':\n total_game_step = self.args.task.total_game_step + 3\n elif self.args.multi_solution and self.args.env == 'npuzzle':\n total_game_step = self.args.task.total_game_step + 3\n elif not self.args.param.last_step and self.args.env == 'game24':\n total_game_step = self.args.task.total_game_step - 1\n else: \n total_game_step = self.args.task.total_game_step\n\n # Initialize current output candidates and other variables\n ys = [''] \n infos = []\n isFinished = False\n\n # Start solving the game\n for step in range(total_game_step+1):\n # Generation phase\n new_ys = []\n\n for y in ys:\n # Generate proposals\n ys_, current_state = self.get_proposals(self.args, self.gpt, self.prompter, x, y, isFinished)\n new_ys.append(ys_)\n \n new_ys = list(itertools.chain(*new_ys))\n\n # If game is finished, log information and break the loop\n if isFinished:\n infos.append({'step': step, 'x': str(x), 'thoughts': ys, 'answer': new_ys})\n ys = new_ys\n break\n\n # If there's no new candidates, it's impossible to reach the answer: early stop\n if len(new_ys) == 0:\n isFinished = True\n select_new_ys = ys[:min(self.args.param.n_select_sample, len(ys))]\n ys = select_new_ys\n infos.append({'step': step, 'x': str(x), 'ys': ys, 'new_ys': new_ys, 'select': [], 'select_new_ys': select_new_ys})\n continue\n \n # Evaluation phase\n # Select the best proposals\n select = self.get_select_got(self.args, self.gpt, self.prompter, self.parser, x, new_ys)\n\n # Print select and new proposals if in debug mode\n if self.to_print:\n print('select', select)\n print('ys', ys)\n print('new_ys', new_ys)\n\n # Preselect new proposals\n select_new_ys_pre = []\n for m in select:\n idx = m - 1\n select_new_ys_pre.append(new_ys[idx])\n \n # Select new proposals\n if len(select_new_ys_pre) > 0:\n select_new_ys = select_new_ys_pre[:min(self.args.param.n_select_sample, len(select_new_ys_pre))]\n else:\n select_new_ys = ys[:min(self.args.param.n_select_sample, len(ys))]\n\n # Log information\n if self.to_print: \n print('select_new_ys_pre', select_new_ys_pre)\n print('select_new_ys', select_new_ys)\n print(f'select --: {select}\\n-- choices --: {select_new_ys}\\n')\n \n infos.append({'step': step, 'x': str(x), 'ys': ys, 'new_ys': new_ys, 'select': select, 'select_new_ys': select_new_ys})\n ys = select_new_ys\n\n # Check if game is finished\n if self.args.env == 'game24':\n isFinished = step == total_game_step - 1\n else:\n moves = self.parser.extract_top_select(select_new_ys)\n # Condition to Stop: 1. One of the candiates Reach the Correct Answer; 2. Reach the maximum step; 3. only left impossible answer\n success = False\n for m in moves:\n success = success or self.game.isFinishing(x, m) \n isFinished = success or step == total_game_step - 1\n\n # Print final proposals if in debug mode\n if self.to_print: \n print(ys)\n # Return final proposals and steps taken to solve the game\n return ys, {'steps': infos}" }, { "identifier": "XoT_Solver", "path": "xot_all_in_one/xot/controller/solver/xot_solver.py", "snippet": "class XoT_Solver:\n \"\"\"\n The XoT_Solver class is designed to solve a variety of games using a combination of Monte Carlo Tree Search (MCTS), \n Neural Networks (NN), and a coaching mechanism. It supports both single and multiple solutions, and can revise its \n solutions based on feedback.\n\n Attributes:\n args: A configuration object containing various parameters.\n gpt: An instance of a GPT model for generating prompts.\n game: An instance of the game to be solved.\n prompter: An instance of a class for generating prompts.\n parser: An instance of a class for parsing actions and thoughts.\n nmcts: An instance of MCTS.\n c: An instance of a Coach.\n to_print: A boolean indicating whether to print debug information.\n \"\"\"\n def __init__(self, args, gpt, game, prompter, parser, to_print=False):\n \"\"\"\n Initializes the XoT_Solver with the given arguments, GPT model, game, prompter, parser, and print option.\n \"\"\"\n self.args = args\n self.gpt = gpt\n self.game = game\n self.prompter = prompter\n self.parser = parser\n self.nmcts, self.c = self.initial_xot(args)\n\n self.to_print = to_print\n \n def initial_xot(self, args):\n \"\"\"\n Initializes the Neural Network and MCTS based on the game environment specified in the arguments.\n \"\"\"\n if args.env.lower() == 'game24':\n from .pytorch_game24.NNet import NNetWrapper as nn\n elif args.env.lower() == 'cube':\n from .pytorch_cube.NNet import NNetWrapper as nn\n elif args.env.lower() == 'npuzzle':\n from .pytorch_npuzzle.NNet import NNetWrapper as nn\n else:\n raise ValueError\n \n nnet = nn(self.game)\n nnet.load_checkpoint(folder=self.args.model.checkpoint, filename=self.args.model.filename)\n nmcts = MCTS(self.game, nnet, args)\n c = Coach(self.game, nnet, args)\n return nmcts, c\n \n def multi_solve_before_revision(self, x):\n \"\"\"\n Solves the game for multiple solutions before any revisions are made.\n \"\"\"\n nmcts_modelcall_before = self.nmcts.getModelCall()\n player = lambda x: np.argmax(self.nmcts.getActionProb(x, temp=0, step=0))\n problem_state, getGameEnded, actions_idx, actions = self.c.generate_thoughts(x, player)\n actions_list, actions_candicates_list = [], []\n for i in range(self.args.xot.multi_solution_exploration):\n selected_ac_seq, _ = self.nmcts.inferSinglePlayer(problem_state, step=0, seed=i)\n if selected_ac_seq is not None:\n actions_candicates_list.append(str(selected_ac_seq))\n \n count = Counter(actions_candicates_list) \n actions_list = [ast.literal_eval(item) for item, _ in count.most_common(3)] \n\n nmcts_modelcall_after = self.nmcts.getModelCall()\n model_call_phase1 = nmcts_modelcall_after - nmcts_modelcall_before\n \n thoughts_list = []\n for actions in actions_list:\n try:\n thoughts_list.append(self.parser.action_to_thoughs(actions, x))\n except:\n continue\n if self.to_print:\n print('xot_solve -- thoughts: ', thoughts_list)\n \n prompt, _ = self.prompter.xot_prompt_multi_wrap(x, thoughts_list)\n instruct = self.prompter.get_instruction_prompt()\n samples = self.gpt(prompt, instruct, n=self.args.param.n_generate_sample, stop=self.args.gpt.stop)\n return samples, thoughts_list, actions, model_call_phase1\n \n\n def single_solve_before_revision(self, x):\n \"\"\"\n Solves the game for a single solution before any revisions are made.\n \"\"\"\n player = lambda x: np.argmax(self.nmcts.getActionProb(x, temp=0, step=0))\n nmcts_modelcall_before = self.nmcts.getModelCall()\n problem_state, getGameEnded, actions_idx, actions = self.c.generate_thoughts(x, player)\n nmcts_modelcall_after = self.nmcts.getModelCall()\n model_call_phase1 = nmcts_modelcall_after - nmcts_modelcall_before\n thoughts = self.parser.action_to_thoughs(actions, x)\n\n if self.to_print:\n print('xot_solve -- thoughts: ', thoughts)\n prompt, _ = self.prompter.xot_prompt_wrap(x, thoughts)\n\n instruct = self.prompter.get_instruction_prompt()\n samples = self.gpt(prompt, instruct, n=self.args.param.n_generate_sample, stop=self.args.gpt.stop)\n \n return samples, thoughts, actions, model_call_phase1\n\n\n def solve_single_revision(self, x, samples, thoughts, actions, model_call_phase1, model_call_phase2):\n \"\"\"\n Revises a single solution based on feedback.\n \"\"\"\n instruct = self.prompter.get_instruction_prompt()\n for revise_count in range(self.args.xot.revise_times):\n infos = [self.parser.test_output(x, y, None) for y in samples]\n isCorrect = infos[0]['r']\n \n # Terminal Condition\n if isCorrect:\n model_call = model_call_phase1 + model_call_phase2\n if revise_count == 0:\n return samples, {}, False, None, [model_call, model_call_phase1, model_call_phase2]\n else:\n return samples, {}, revise_count, revised_state, [model_call, model_call_phase1, model_call_phase2]\n\n if not isCorrect:\n revised_prompt, _ = self.prompter.xot_prompt_revised_wrap(x, thoughts)\n revised_samples = self.gpt(revised_prompt, instruct, n=self.args.param.n_generate_sample, stop=self.args.gpt.stop)\n \n if self.to_print:\n print('revised_prompt', revised_prompt)\n print('revised_samples', revised_samples)\n \n if 'wrong' in revised_samples[0].lower() or 'incorrect' in revised_samples[0].lower():\n try:\n if 'all steps are wrong' in revised_samples[0].lower():\n incorrect_step = 1\n if self.to_print:\n print('all the steps are wrong')\n else:\n incorrect_step = int(revised_samples[0].split('is wrong')[0].strip().split(']')[0][-1])\n \n revised_state = self.parser.get_revised_state(x, thoughts, incorrect_step)\n \n if self.to_print:\n print('incorrect_step', incorrect_step)\n print('revised_state', revised_state)\n \n if self.args.env == 'game24':\n if incorrect_step > 1:\n ac_seq = actions_idx[:incorrect_step-1]\n state = x\n for i in range(len(ac_seq)):\n state, _ = self.game.getNextState(state, actions_idx[i])\n revised_state = state\n\n nmcts_modelcall_before = self.nmcts.getModelCall()\n player = lambda x: np.argmax(self.nmcts.getActionProb(x, temp=0, step=0))\n\n problem_state, getGameEnded, actions_idx, actions_revised = self.c.generate_thoughts(revised_state, player)\n nmcts_modelcall_after = self.nmcts.getModelCall()\n model_call_phase2 += nmcts_modelcall_after - nmcts_modelcall_before\n\n actions_after_revised = actions[:incorrect_step-1]\n actions_after_revised.extend(actions_revised)\n \n thoughts_revised = self.parser.action_to_thoughs(actions_after_revised, x)\n \n if self.to_print:\n print('actions_revised', actions_revised)\n print('actions_after_revised', actions_after_revised)\n print('thoughts_revised', thoughts_revised)\n print('xot_solve -- revised thoughts: ', thoughts_revised)\n\n prompt, _ = self.prompter.xot_prompt_wrap(x, thoughts_revised)\n instruct = self.prompter.get_instruction_prompt()\n samples = self.gpt(prompt, instruct, n=self.args.param.n_generate_sample, stop=self.args.gpt.stop)\n except:\n continue\n \n # after N revised times\n model_call = model_call_phase1 + model_call_phase2\n return samples, {}, revise_count+1, None, [model_call, model_call_phase1, model_call_phase2] \n\n\n def solve_multi_revision(self, x, samples, thoughts_list, actions, model_call_phase1, model_call_phase2_total):\n \"\"\"\n Revises multiple solutions based on feedback.\n \"\"\"\n instruct = self.prompter.get_instruction_prompt()\n infos = [self.parser.test_output_multi(x, y, [None, None, None]) for y in samples]\n isCorrect_list = infos[0]['r']\n \n if self.to_print:\n print('x', x)\n print('infos', infos)\n print('thoughts_list', thoughts_list)\n print('samples', samples)\n print('isCorrect_list', isCorrect_list)\n\n thoughts_revised_list = thoughts_list[:]\n revise_flags = [False] * len(isCorrect_list)\n\n for idx, r_ in enumerate(isCorrect_list):\n if idx >= len(thoughts_list): # It is posssible that gpt provides more ans than given thoughts\n break\n isCorrect = isCorrect_list[r_]['r'] \n if self.to_print:\n print('isCorrect', isCorrect)\n \n if not isCorrect:\n revise_flags[idx] = True\n thoughts = thoughts_list[idx]\n \n revised_prompt, _ = self.prompter.xot_prompt_revised_wrap(x, thoughts)\n revised_samples = self.gpt(revised_prompt, instruct, n=self.args.param.n_generate_sample, stop=self.args.gpt.stop)\n \n if self.to_print:\n print('thoughts', thoughts)\n print('revised_prompt', revised_prompt)\n print('revised_samples', revised_samples)\n \n if 'wrong' in revised_samples[0].lower()or 'incorrect' in revised_samples[0].lower():\n try:\n if 'all steps are wrong' in revised_samples[0].lower():\n incorrect_step = 1\n if self.to_print:\n print('all steps are wrong')\n else:\n incorrect_step = int(revised_samples[0].split('is wrong')[0].strip().split(']')[0][-1])\n \n revised_state = self.parser.get_revised_state(x, thoughts, incorrect_step)\n if self.to_print:\n print('incorrect_step', incorrect_step)\n print('revised_state', revised_state)\n\n if self.args.env == 'game24':\n if incorrect_step > 1:\n ac_seq = actions_idx[:incorrect_step-1]\n state = x\n for i in range(len(ac_seq)):\n state, _ = self.game.getNextState(state, actions_idx[i])\n revised_state = state\n \n nmcts_modelcall_before = self.nmcts.getModelCall()\n player = lambda x: np.argmax(self.nmcts.getActionProb(x, temp=0, step=0))\n problem_state, getGameEnded, actions_idx, actions_revised = self.c.generate_thoughts(revised_state, player)\n nmcts_modelcall_after = self.nmcts.getModelCall()\n model_call_phase2_total += nmcts_modelcall_after - nmcts_modelcall_before\n \n actions_after_revised = actions[:incorrect_step-1]\n actions_after_revised.extend(actions_revised)\n \n thoughts_revised = self.parser.action_to_thoughs(actions_after_revised, x)\n thoughts_revised_list[idx] = thoughts_revised\n\n if self.to_print:\n print('actions_revised', actions_revised)\n print('actions_after_revised', actions_after_revised)\n print('thoughts_revised', thoughts_revised)\n \n except:\n model_call_phase2_total += 0\n else:\n model_call_phase2_total += 0\n\n prompt, _ = self.prompter.xot_prompt_multi_wrap(x, thoughts_revised_list)\n instruct = self.prompter.get_instruction_prompt()\n samples_revised = self.gpt(prompt, instruct, n=self.args.param.n_generate_sample, stop=self.args.gpt.stop)\n model_call = model_call_phase1 + model_call_phase2_total\n return samples_revised, {}, revise_flags, None, [model_call, model_call_phase1, model_call_phase2_total]\n \n\n def solve(self, idx):\n \"\"\"\n The main method that solves the game. It first generates solutions, then revises them if necessary.\n \"\"\"\n x = self.game.getOneTestBoard(idx)\n self.nmcts.reset()\n model_call_phase1, model_call_phase2 = 0, 0\n\n # Load Config\n self.c.game.total_game_step = self.args.task.total_game_step\n if self.args.multi_solution:\n self.nmcts.args.numMCTSSims = self.args.xot.multi_numMCTSSims\n else:\n self.nmcts.args.numMCTSSims = self.args.xot.numMCTSSims\n \n if not self.args.multi_solution:\n samples, thoughts, actions, model_call_phase1 = self.single_solve_before_revision(x)\n else:\n samples, thoughts_list, actions, model_call_phase1 = self.multi_solve_before_revision(x)\n \n if not self.args.xot.revised:\n model_call = model_call_phase1 + model_call_phase2\n return samples, {}, False, None, [model_call, model_call_phase1, model_call_phase2]\n else: \n # Update Config For Revision\n self.c.game.total_game_step = self.args.xot.revise_total_game_step\n self.nmcts.args.numMCTSSims = self.args.xot.revise_numMCTSSims\n if self.args.xot.revised and not self.args.multi_solution:\n return self.solve_single_revision(x, samples, thoughts, actions, model_call_phase1, model_call_phase2)\n \n if self.args.xot.revised and self.args.multi_solution:\n return self.solve_multi_revision(x, samples, thoughts_list, actions, model_call_phase1, model_call_phase2)" } ]
import os import json import itertools import random import ast import re import numpy as np import pandas as pd from collections import Counter from .utils import * from .solver.io_solver import IO_Solver from .solver.cot_solver import CoT_Solver from .solver.tot_solver import ToT_Solver from .solver.got_solver import GoT_Solver from .solver.xot_solver import XoT_Solver
9,493
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. class Controller: """ Controller class to manage the execution flow This involves language models, operations, prompting, and parsing. """ def __init__(self, config, gpt, game, prompter, parser): self.config = config self.gpt = gpt self.game = game self.prompter = prompter self.parser = parser def initial_logs(self, config): if config.method == 'io' or config.method == 'cot': file = f'logs/{config.env}/{config.gpt.backend}_{config.gpt.temperature}_{config.method}_sample{config.param.n_generate_sample}_multi{config.multi_solution}_start{config.task.task_start_index}_end{config.task.task_end_index}.json' elif config.method == 'tot': file = f'logs/{config.env}/{config.gpt.backend}_{config.gpt.temperature}_{config.method}_propose{config.param.n_generate_sample}_value{config.param.n_evaluate_sample}_greedy{config.param.n_select_sample}_start{config.task.task_start_index}_end{config.task.task_end_index}_laststep{config.param.last_step}.json' elif config.method == 'got': file = f'logs/{config.env}/{config.gpt.backend}_{config.gpt.temperature}_{config.method}_multi{config.multi_solution}_propose{config.param.n_generate_sample}_value{config.param.n_evaluate_sample}_greedy{config.param.n_select_sample}_start{config.task.task_start_index}_end{config.task.task_end_index}_laststep{config.param.last_step}.json' elif config.method == 'xot': file = f'logs/{config.env}/{config.gpt.backend}_{config.gpt.temperature}_{config.method}_multi{config.multi_solution}_revised{config.xot.revised}_reviseTimes{config.xot.revise_times}_start{config.task.task_start_index}_end{config.task.task_end_index}_laststep{config.param.last_step}.json' else: raise ValueError("invalid method") os.makedirs(os.path.dirname(file), exist_ok=True) return file def initial_solver(self, config): if config.method == 'io': return IO_Solver(config, self.gpt, self.game, self.prompter, self.parser) elif config.method == 'cot': return CoT_Solver(config, self.gpt, self.game, self.prompter, self.parser) elif config.method == 'tot':
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. class Controller: """ Controller class to manage the execution flow This involves language models, operations, prompting, and parsing. """ def __init__(self, config, gpt, game, prompter, parser): self.config = config self.gpt = gpt self.game = game self.prompter = prompter self.parser = parser def initial_logs(self, config): if config.method == 'io' or config.method == 'cot': file = f'logs/{config.env}/{config.gpt.backend}_{config.gpt.temperature}_{config.method}_sample{config.param.n_generate_sample}_multi{config.multi_solution}_start{config.task.task_start_index}_end{config.task.task_end_index}.json' elif config.method == 'tot': file = f'logs/{config.env}/{config.gpt.backend}_{config.gpt.temperature}_{config.method}_propose{config.param.n_generate_sample}_value{config.param.n_evaluate_sample}_greedy{config.param.n_select_sample}_start{config.task.task_start_index}_end{config.task.task_end_index}_laststep{config.param.last_step}.json' elif config.method == 'got': file = f'logs/{config.env}/{config.gpt.backend}_{config.gpt.temperature}_{config.method}_multi{config.multi_solution}_propose{config.param.n_generate_sample}_value{config.param.n_evaluate_sample}_greedy{config.param.n_select_sample}_start{config.task.task_start_index}_end{config.task.task_end_index}_laststep{config.param.last_step}.json' elif config.method == 'xot': file = f'logs/{config.env}/{config.gpt.backend}_{config.gpt.temperature}_{config.method}_multi{config.multi_solution}_revised{config.xot.revised}_reviseTimes{config.xot.revise_times}_start{config.task.task_start_index}_end{config.task.task_end_index}_laststep{config.param.last_step}.json' else: raise ValueError("invalid method") os.makedirs(os.path.dirname(file), exist_ok=True) return file def initial_solver(self, config): if config.method == 'io': return IO_Solver(config, self.gpt, self.game, self.prompter, self.parser) elif config.method == 'cot': return CoT_Solver(config, self.gpt, self.game, self.prompter, self.parser) elif config.method == 'tot':
return ToT_Solver(config, self.gpt, self.game, self.prompter, self.parser)
2
2023-11-08 09:48:34+00:00
12k
UMass-Foundation-Model/CoVLM
transformers/src/transformers/models/opt/modeling_tf_opt.py
[ { "identifier": "get_tf_activation", "path": "transformers/src/transformers/activations_tf.py", "snippet": "def get_tf_activation(activation_string):\n if activation_string in ACT2FN:\n return ACT2FN[activation_string]\n else:\n raise KeyError(f\"function {activation_string} not found in ACT2FN mapping {list(ACT2FN.keys())}\")" }, { "identifier": "TFBaseModelOutputWithPast", "path": "transformers/src/transformers/modeling_tf_outputs.py", "snippet": "class TFBaseModelOutputWithPast(ModelOutput):\n \"\"\"\n Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).\n\n Args:\n last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):\n Sequence of hidden-states at the output of the last layer of the model.\n\n If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,\n hidden_size)` is output.\n past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads,\n sequence_length, embed_size_per_head)`).\n\n Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see\n `past_key_values` input) to speed up sequential decoding.\n hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape\n `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n last_hidden_state: tf.Tensor = None\n past_key_values: List[tf.Tensor] | None = None\n hidden_states: Tuple[tf.Tensor] | None = None\n attentions: Tuple[tf.Tensor] | None = None" }, { "identifier": "TFCausalLMOutputWithPast", "path": "transformers/src/transformers/modeling_tf_outputs.py", "snippet": "class TFCausalLMOutputWithPast(ModelOutput):\n \"\"\"\n Base class for causal language model (or autoregressive) outputs.\n\n Args:\n loss (`tf.Tensor` of shape `(n,)`, *optional*, where n is the number of non-masked labels, returned when `labels` is provided):\n Language modeling loss (for next-token prediction).\n logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads,\n sequence_length, embed_size_per_head)`).\n\n Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see\n `past_key_values` input) to speed up sequential decoding.\n hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape\n `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n loss: tf.Tensor | None = None\n logits: tf.Tensor = None\n past_key_values: List[tf.Tensor] | None = None\n hidden_states: Tuple[tf.Tensor] | None = None\n attentions: Tuple[tf.Tensor] | None = None" }, { "identifier": "TFCausalLanguageModelingLoss", "path": "transformers/src/transformers/modeling_tf_utils.py", "snippet": "def dummy_loss(y_true, y_pred):\n def num_parameters(self, only_trainable: bool = False) -> int:\ndef keras_serializable(cls):\n def wrapped_init(self, *args, **kwargs):\n def get_config(self):\n def hf_compute_loss(self, labels, logits):\n def hf_compute_loss(self, labels, logits):\n def hf_compute_loss(self, labels, logits):\n def hf_compute_loss(self, labels, logits):\n def hf_compute_loss(self, labels, logits):\n def hf_compute_loss(self, labels, logits):\ndef booleans_processing(config, **kwargs):\ndef unpack_inputs(func):\n def run_call_with_unpacked_inputs(self, *args, **kwargs):\ndef input_processing(func, config, **kwargs):\ndef dtype_byte_size(dtype):\ndef format_weight_name(name, _prefix=None):\ndef tf_shard_checkpoint(weights, max_shard_size=\"10GB\"):\ndef load_tf_sharded_weights(model, shard_files, ignore_mismatched_sizes=False, strict=False, _prefix=None):\ndef load_tf_shard(model, model_layer_map, resolved_archive_file, ignore_mismatched_sizes=False, _prefix=None):\ndef load_tf_weights(model, resolved_archive_file, ignore_mismatched_sizes=False, _prefix=None):\ndef load_tf_weights_from_h5(model, resolved_archive_file, ignore_mismatched_sizes=False, _prefix=None):\ndef load_tf_weights_from_safetensors(model, resolved_archive_file, ignore_mismatched_sizes=False, _prefix=None):\ndef init_copy_embeddings(old_embeddings, new_num_tokens):\n def dummy_inputs(self) -> Dict[str, tf.Tensor]:\n def framework(self) -> str:\n def build(self, input_shape=None):\n def __init__(self, config, *inputs, **kwargs):\n def get_config(self):\n def from_config(cls, config, **kwargs):\n def _from_config(cls, config, **kwargs):\n def get_head_mask(self, head_mask: tf.Tensor | None, num_hidden_layers: int) -> tf.Tensor:\n def _convert_head_mask_to_5d(self, head_mask, num_hidden_layers):\n def serving(self, inputs):\n def eager_serving(self, inputs):\n def input_signature(self) -> Dict[str, tf.TensorSpec]:\n def serving_output(self, output):\n def can_generate(cls) -> bool:\n def get_input_embeddings(self) -> tf.keras.layers.Layer:\n def _save_checkpoint(self, checkpoint_dir, epoch):\n def load_repo_checkpoint(self, repo_path_or_name):\n def prepare_tf_dataset(\n self,\n dataset: \"datasets.Dataset\", # noqa:F821\n batch_size: int = 8,\n shuffle: bool = True,\n tokenizer: Optional[\"PreTrainedTokenizerBase\"] = None,\n collate_fn: Optional[Callable] = None,\n collate_fn_args: Optional[Dict[str, Any]] = None,\n drop_remainder: Optional[bool] = None,\n prefetch: bool = True,\n ):\n def compile(\n self,\n optimizer=\"rmsprop\",\n loss=\"auto_with_warning\",\n metrics=None,\n loss_weights=None,\n weighted_metrics=None,\n run_eagerly=None,\n steps_per_execution=None,\n **kwargs,\n ):\n def compute_loss(self, *args, **kwargs):\n def get_label_to_output_name_mapping(self):\n def train_step(self, data):\n def test_step(self, data):\n def create_model_card(\n self,\n output_dir,\n model_name: str,\n language: Optional[str] = None,\n license: Optional[str] = None,\n tags: Optional[str] = None,\n finetuned_from: Optional[str] = None,\n tasks: Optional[str] = None,\n dataset_tags: Optional[Union[str, List[str]]] = None,\n dataset: Optional[Union[str, List[str]]] = None,\n dataset_args: Optional[Union[str, List[str]]] = None,\n ):\n def set_input_embeddings(self, value):\n def get_output_embeddings(self) -> Union[None, tf.keras.layers.Layer]:\n def set_output_embeddings(self, value):\n def get_output_layer_with_bias(self) -> Union[None, tf.keras.layers.Layer]:\n def get_prefix_bias_name(self) -> Union[None, str]:\n def get_bias(self) -> Union[None, Dict[str, tf.Variable]]:\n def set_bias(self, value):\n def get_lm_head(self) -> tf.keras.layers.Layer:\n def resize_token_embeddings(\n self, new_num_tokens: Optional[int] = None\n ) -> Union[tf.keras.layers.Embedding, tf.Variable]:\n def _v2_resized_token_embeddings(self, new_num_tokens: Optional[int] = None) -> tf.keras.layers.Embedding:\n def _get_word_embedding_weight(model, embedding_layer):\n def _resize_token_embeddings(self, new_num_tokens):\n def _v2_resize_token_embeddings(self, new_num_tokens):\n def _get_resized_lm_head_bias(self, old_lm_head_bias, new_num_tokens):\n def _v2_get_resized_lm_head_bias(\n self, old_lm_head_bias: Dict[str, tf.Variable], new_num_tokens: int\n ) -> Dict[str, tf.Tensor]:\n def _get_resized_lm_head_decoder(self, old_lm_head_decoder, new_num_tokens):\n def _get_resized_embeddings(self, old_embeddings, new_num_tokens=None) -> tf.Variable:\n def _v2_get_resized_embeddings(\n self, old_embeddings: tf.keras.layers.Embedding, new_num_tokens: int\n ) -> tf.keras.layers.Embedding:\n def prune_heads(self, heads_to_prune):\n def save_pretrained(\n self,\n save_directory,\n saved_model=False,\n version=1,\n push_to_hub=False,\n signatures=None,\n max_shard_size: Union[int, str] = \"10GB\",\n create_pr: bool = False,\n safe_serialization: bool = False,\n token: Optional[Union[str, bool]] = None,\n **kwargs,\n ):\n def from_pretrained(\n cls,\n pretrained_model_name_or_path: Optional[Union[str, os.PathLike]],\n *model_args,\n config: Optional[Union[PretrainedConfig, str, os.PathLike]] = None,\n cache_dir: Optional[Union[str, os.PathLike]] = None,\n ignore_mismatched_sizes: bool = False,\n force_download: bool = False,\n local_files_only: bool = False,\n token: Optional[Union[str, bool]] = None,\n revision: str = \"main\",\n **kwargs,\n ):\n def push_to_hub(\n self,\n repo_id: str,\n use_temp_dir: Optional[bool] = None,\n commit_message: Optional[str] = None,\n private: Optional[bool] = None,\n max_shard_size: Optional[Union[int, str]] = \"10GB\",\n token: Optional[Union[bool, str]] = None,\n # (`use_auth_token` is deprecated: we have to keep it here as we don't have **kwargs)\n use_auth_token: Optional[Union[bool, str]] = None,\n create_pr: bool = False,\n **base_model_card_args,\n ) -> str:\n def register_for_auto_class(cls, auto_class=\"TFAutoModel\"):\n def __init__(self, nf, nx, initializer_range=0.02, **kwargs):\n def build(self, input_shape):\n def call(self, x):\n def __init__(self, vocab_size: int, hidden_size: int, initializer_range: Optional[float] = None, **kwargs):\n def build(self, input_shape):\n def get_config(self):\n def call(self, inputs: tf.Tensor, mode: str = \"embedding\") -> tf.Tensor:\n def _embedding(self, input_ids):\n def _linear(self, inputs):\n def __init__(self, config: PretrainedConfig, initializer_range: float = 0.02, **kwargs):\n def call(self, inputs, cls_index=None, training=False):\ndef get_initializer(initializer_range: float = 0.02) -> tf.keras.initializers.TruncatedNormal:\nclass TFModelUtilsMixin:\nclass TFCausalLanguageModelingLoss:\nclass TFQuestionAnsweringLoss:\nclass TFTokenClassificationLoss:\nclass TFSequenceClassificationLoss:\nclass TFMultipleChoiceLoss:\nclass TFMaskedLanguageModelingLoss(TFCausalLanguageModelingLoss):\nclass TFNextSentencePredictionLoss:\nclass TFPreTrainedModel(tf.keras.Model, TFModelUtilsMixin, TFGenerationMixin, PushToHubMixin):\nclass TFConv1D(tf.keras.layers.Layer):\nclass TFSharedEmbeddings(tf.keras.layers.Layer):\nclass TFSequenceSummary(tf.keras.layers.Layer):" }, { "identifier": "check_embeddings_within_bounds", "path": "transformers/src/transformers/tf_utils.py", "snippet": "def check_embeddings_within_bounds(tensor: tf.Tensor, embed_dim: int, tensor_name: str = \"input_ids\") -> None:\n \"\"\"\n `tf.gather`, on which TF embedding layers are based, won't check positive out of bound indices on GPU, returning\n zeros instead. This function adds a check against that dangerous silent behavior.\n\n Args:\n tensor (`tf.Tensor`): The tensor of indices to check.\n embed_dim (`int`): The embedding dimension.\n tensor_name (`str`, *optional*): The name of the tensor to use in the error message.\n \"\"\"\n tf.debugging.assert_less(\n tensor,\n tf.cast(embed_dim, dtype=tensor.dtype),\n message=(\n f\"The maximum value of {tensor_name} ({tf.math.reduce_max(tensor)}) must be smaller than the embedding \"\n f\"layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time.\"\n ),\n )" }, { "identifier": "shape_list", "path": "transformers/src/transformers/tf_utils.py", "snippet": "def shape_list(tensor: Union[tf.Tensor, np.ndarray]) -> List[int]:\n \"\"\"\n Deal with dynamic shape in tensorflow cleanly.\n\n Args:\n tensor (`tf.Tensor` or `np.ndarray`): The tensor we want the shape of.\n\n Returns:\n `List[int]`: The shape of the tensor as a list.\n \"\"\"\n if isinstance(tensor, np.ndarray):\n return list(tensor.shape)\n\n dynamic = tf.shape(tensor)\n\n if tensor.shape == tf.TensorShape(None):\n return dynamic\n\n static = tensor.shape.as_list()\n\n return [dynamic[i] if s is None else s for i, s in enumerate(static)]" }, { "identifier": "stable_softmax", "path": "transformers/src/transformers/tf_utils.py", "snippet": "def stable_softmax(logits: tf.Tensor, axis: Optional[int] = None, name: Optional[str] = None) -> tf.Tensor:\n \"\"\"\n Stable wrapper that returns the same output as `tf.nn.softmax`, but that works reliably with XLA on CPU. It is\n meant as a workaround for the [following issue](https://github.com/tensorflow/tensorflow/issues/55682), and will be\n removed after it gets fixed. The arguments and outputs are the same as `tf.nn.softmax`, and relies on the fact that\n `softmax(x) = softmax(x + c)` (see https://ogunlao.github.io/2020/04/26/you_dont_really_know_softmax.html).\n\n Args:\n logits (`tf.Tensor`):\n Must be one of the following types: half, float32, float64.\n axis (`int`, *optional*):\n The dimension softmax would be performed on. The default is -1 which indicates the last dimension.\n name (`str`, *optional*):\n A name for the operation.\n\n Returns:\n `tf.Tensor`:\n A Tensor. Has the same type and shape as logits.\n \"\"\"\n # TODO: When the issue linked above gets sorted, add a check on TF version here and use the original function if\n # it has the fix. After we drop the support for unfixed versions, remove this function.\n return tf.nn.softmax(logits=logits + 1e-9, axis=axis, name=name)" }, { "identifier": "logging", "path": "transformers/src/transformers/utils/logging.py", "snippet": "def _get_default_logging_level():\ndef _get_library_name() -> str:\ndef _get_library_root_logger() -> logging.Logger:\ndef _configure_library_root_logger() -> None:\ndef _reset_library_root_logger() -> None:\ndef get_log_levels_dict():\ndef get_logger(name: Optional[str] = None) -> logging.Logger:\ndef get_verbosity() -> int:\ndef set_verbosity(verbosity: int) -> None:\ndef set_verbosity_info():\ndef set_verbosity_warning():\ndef set_verbosity_debug():\ndef set_verbosity_error():\ndef disable_default_handler() -> None:\ndef enable_default_handler() -> None:\ndef add_handler(handler: logging.Handler) -> None:\ndef remove_handler(handler: logging.Handler) -> None:\ndef disable_propagation() -> None:\ndef enable_propagation() -> None:\ndef enable_explicit_format() -> None:\ndef reset_format() -> None:\ndef warning_advice(self, *args, **kwargs):\ndef warning_once(self, *args, **kwargs):\n def __init__(self, *args, **kwargs): # pylint: disable=unused-argument\n def __iter__(self):\n def __getattr__(self, _):\n def empty_fn(*args, **kwargs): # pylint: disable=unused-argument\n def __enter__(self):\n def __exit__(self, type_, value, traceback):\n def __call__(self, *args, **kwargs):\n def set_lock(self, *args, **kwargs):\n def get_lock(self):\ndef is_progress_bar_enabled() -> bool:\ndef enable_progress_bar():\ndef disable_progress_bar():\nclass EmptyTqdm:\nclass _tqdm_cls:" }, { "identifier": "add_code_sample_docstrings", "path": "transformers/src/transformers/utils/doc.py", "snippet": "def add_code_sample_docstrings(\n *docstr,\n processor_class=None,\n checkpoint=None,\n output_type=None,\n config_class=None,\n mask=\"[MASK]\",\n qa_target_start_index=14,\n qa_target_end_index=15,\n model_cls=None,\n modality=None,\n expected_output=None,\n expected_loss=None,\n real_checkpoint=None,\n):\n def docstring_decorator(fn):\n # model_class defaults to function's class if not specified otherwise\n model_class = fn.__qualname__.split(\".\")[0] if model_cls is None else model_cls\n\n if model_class[:2] == \"TF\":\n sample_docstrings = TF_SAMPLE_DOCSTRINGS\n elif model_class[:4] == \"Flax\":\n sample_docstrings = FLAX_SAMPLE_DOCSTRINGS\n else:\n sample_docstrings = PT_SAMPLE_DOCSTRINGS\n\n # putting all kwargs for docstrings in a dict to be used\n # with the `.format(**doc_kwargs)`. Note that string might\n # be formatted with non-existing keys, which is fine.\n doc_kwargs = {\n \"model_class\": model_class,\n \"processor_class\": processor_class,\n \"checkpoint\": checkpoint,\n \"mask\": mask,\n \"qa_target_start_index\": qa_target_start_index,\n \"qa_target_end_index\": qa_target_end_index,\n \"expected_output\": expected_output,\n \"expected_loss\": expected_loss,\n \"real_checkpoint\": real_checkpoint,\n \"fake_checkpoint\": checkpoint,\n \"true\": \"{true}\", # For <Tip warning={true}> syntax that conflicts with formatting.\n }\n\n if (\"SequenceClassification\" in model_class or \"AudioClassification\" in model_class) and modality == \"audio\":\n code_sample = sample_docstrings[\"AudioClassification\"]\n elif \"SequenceClassification\" in model_class:\n code_sample = sample_docstrings[\"SequenceClassification\"]\n elif \"QuestionAnswering\" in model_class:\n code_sample = sample_docstrings[\"QuestionAnswering\"]\n elif \"TokenClassification\" in model_class:\n code_sample = sample_docstrings[\"TokenClassification\"]\n elif \"MultipleChoice\" in model_class:\n code_sample = sample_docstrings[\"MultipleChoice\"]\n elif \"MaskedLM\" in model_class or model_class in [\"FlaubertWithLMHeadModel\", \"XLMWithLMHeadModel\"]:\n code_sample = sample_docstrings[\"MaskedLM\"]\n elif \"LMHead\" in model_class or \"CausalLM\" in model_class:\n code_sample = sample_docstrings[\"LMHead\"]\n elif \"CTC\" in model_class:\n code_sample = sample_docstrings[\"CTC\"]\n elif \"AudioFrameClassification\" in model_class:\n code_sample = sample_docstrings[\"AudioFrameClassification\"]\n elif \"XVector\" in model_class and modality == \"audio\":\n code_sample = sample_docstrings[\"AudioXVector\"]\n elif \"Model\" in model_class and modality == \"audio\":\n code_sample = sample_docstrings[\"SpeechBaseModel\"]\n elif \"Model\" in model_class and modality == \"vision\":\n code_sample = sample_docstrings[\"VisionBaseModel\"]\n elif \"Model\" in model_class or \"Encoder\" in model_class:\n code_sample = sample_docstrings[\"BaseModel\"]\n elif \"ImageClassification\" in model_class:\n code_sample = sample_docstrings[\"ImageClassification\"]\n else:\n raise ValueError(f\"Docstring can't be built for model {model_class}\")\n\n code_sample = filter_outputs_from_example(\n code_sample, expected_output=expected_output, expected_loss=expected_loss\n )\n if real_checkpoint is not None:\n code_sample = FAKE_MODEL_DISCLAIMER + code_sample\n func_doc = (fn.__doc__ or \"\") + \"\".join(docstr)\n output_doc = \"\" if output_type is None else _prepare_output_docstrings(output_type, config_class)\n built_doc = code_sample.format(**doc_kwargs)\n fn.__doc__ = func_doc + output_doc + built_doc\n return fn\n\n return docstring_decorator" }, { "identifier": "add_start_docstrings", "path": "transformers/src/transformers/utils/doc.py", "snippet": "def add_start_docstrings(*docstr):\n def docstring_decorator(fn):\n fn.__doc__ = \"\".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else \"\")\n return fn\n\n return docstring_decorator" }, { "identifier": "add_start_docstrings_to_model_forward", "path": "transformers/src/transformers/utils/doc.py", "snippet": "def add_start_docstrings_to_model_forward(*docstr):\n def docstring_decorator(fn):\n docstring = \"\".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else \"\")\n class_name = f\"[`{fn.__qualname__.split('.')[0]}`]\"\n intro = f\" The {class_name} forward method, overrides the `__call__` special method.\"\n note = r\"\"\"\n\n <Tip>\n\n Although the recipe for forward pass needs to be defined within this function, one should call the [`Module`]\n instance afterwards instead of this since the former takes care of running the pre and post processing steps while\n the latter silently ignores them.\n\n </Tip>\n\"\"\"\n\n fn.__doc__ = intro + note + docstring\n return fn\n\n return docstring_decorator" }, { "identifier": "replace_return_docstrings", "path": "transformers/src/transformers/utils/doc.py", "snippet": "def replace_return_docstrings(output_type=None, config_class=None):\n def docstring_decorator(fn):\n func_doc = fn.__doc__\n lines = func_doc.split(\"\\n\")\n i = 0\n while i < len(lines) and re.search(r\"^\\s*Returns?:\\s*$\", lines[i]) is None:\n i += 1\n if i < len(lines):\n indent = len(_get_indent(lines[i]))\n lines[i] = _prepare_output_docstrings(output_type, config_class, min_indent=indent)\n func_doc = \"\\n\".join(lines)\n else:\n raise ValueError(\n f\"The function {fn} should have an empty 'Return:' or 'Returns:' in its docstring as placeholder, \"\n f\"current docstring is:\\n{func_doc}\"\n )\n fn.__doc__ = func_doc\n return fn\n\n return docstring_decorator" }, { "identifier": "OPTConfig", "path": "transformers/src/transformers/models/opt/configuration_opt.py", "snippet": "class OPTConfig(PretrainedConfig):\n r\"\"\"\n This is the configuration class to store the configuration of a [`OPTModel`]. It is used to instantiate a OPT model\n according to the specified arguments, defining the model architecture. Instantiating a configuration with the\n defaults will yield a similar configuration to that of the OPT\n [facebook/opt-350m](https://huggingface.co/facebook/opt-350m) architecture.\n\n Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\n documentation from [`PretrainedConfig`] for more information.\n\n\n Args:\n vocab_size (`int`, *optional*, defaults to 50272):\n Vocabulary size of the OPT model. Defines the number of different tokens that can be represented by the\n `inputs_ids` passed when calling [`OPTModel`]\n hidden_size (`int`, *optional*, defaults to 768):\n Dimensionality of the layers and the pooler layer.\n num_hidden_layers (`int`, *optional*, defaults to 12):\n Number of decoder layers.\n ffn_dim (`int`, *optional*, defaults to 3072):\n Dimensionality of the \"intermediate\" (often named feed-forward) layer in decoder.\n num_attention_heads (`int`, *optional*, defaults to 12):\n Number of attention heads for each attention layer in the Transformer decoder.\n activation_function (`str` or `function`, *optional*, defaults to `\"relu\"`):\n The non-linear activation function (function or string) in the encoder and pooler. If string, `\"gelu\"`,\n `\"relu\"`, `\"silu\"` and `\"gelu_new\"` are supported.\n max_position_embeddings (`int`, *optional*, defaults to 2048):\n The maximum sequence length that this model might ever be used with. Typically set this to something large\n just in case (e.g., 512 or 1024 or 2048).\n do_layer_norm_before (`bool`, *optional*, defaults to `True`):\n Whether to perform layer normalization before the attention block.\n word_embed_proj_dim (`int`, *optional*):\n `word_embed_proj_dim` can be set to down-project word embeddings, *e.g.* `opt-350m`. Defaults to\n `hidden_size`.\n dropout (`float`, *optional*, defaults to 0.1):\n The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.\n attention_dropout (`float`, *optional*, defaults to 0.0):\n The dropout ratio for the attention probabilities.\n layerdrop (`float`, *optional*, defaults to 0.0):\n The LayerDrop probability. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more\n details.\n init_std (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n enable_bias (`bool`, *optional*, defaults to `True`):\n Whether or not if the linear layers in the attention blocks should use the bias term.\n layer_norm_elementwise_affine (`bool`, *optional*, defaults to `True`):\n Whether or not if the layer norms should have learnable parameters.\n\n Example:\n\n ```python\n >>> from transformers import OPTConfig, OPTModel\n\n >>> # Initializing a OPT facebook/opt-large style configuration\n >>> configuration = OPTConfig()\n\n >>> # Initializing a model (with random weights) from the facebook/opt-large style configuration\n >>> model = OPTModel(configuration)\n\n >>> # Accessing the model configuration\n >>> configuration = model.config\n ```\"\"\"\n model_type = \"opt\"\n keys_to_ignore_at_inference = [\"past_key_values\"]\n\n def __init__(\n self,\n vocab_size=50272,\n hidden_size=768,\n num_hidden_layers=12,\n ffn_dim=3072,\n max_position_embeddings=2048,\n do_layer_norm_before=True,\n _remove_final_layer_norm=False,\n word_embed_proj_dim=None,\n dropout=0.1,\n attention_dropout=0.0,\n num_attention_heads=12,\n activation_function=\"relu\",\n layerdrop=0.0,\n init_std=0.02,\n use_cache=True,\n pad_token_id=1,\n bos_token_id=2,\n eos_token_id=2,\n enable_bias=True,\n layer_norm_elementwise_affine=True,\n **kwargs,\n ):\n super().__init__(\n pad_token_id=pad_token_id,\n bos_token_id=bos_token_id,\n eos_token_id=eos_token_id,\n **kwargs,\n )\n self.vocab_size = vocab_size\n self.max_position_embeddings = max_position_embeddings\n self.num_attention_heads = num_attention_heads\n self.word_embed_proj_dim = word_embed_proj_dim if word_embed_proj_dim is not None else hidden_size\n self.ffn_dim = ffn_dim\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.dropout = dropout\n self.attention_dropout = attention_dropout\n self.activation_function = activation_function\n self.init_std = init_std\n self.layerdrop = layerdrop\n self.use_cache = use_cache\n self.do_layer_norm_before = do_layer_norm_before\n # We keep these variables at `True` for backward compatibility.\n self.enable_bias = enable_bias\n self.layer_norm_elementwise_affine = layer_norm_elementwise_affine\n\n # Note that the only purpose of `_remove_final_layer_norm` is to keep backward compatibility\n # with checkpoints that have been fine-tuned before transformers v4.20.1\n # see https://github.com/facebookresearch/metaseq/pull/164\n self._remove_final_layer_norm = _remove_final_layer_norm" } ]
from typing import Optional, Tuple, Union from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import TFBaseModelOutputWithPast, TFCausalLMOutputWithPast from ...modeling_tf_utils import ( TFCausalLanguageModelingLoss, TFModelInputType, TFPreTrainedModel, TFSharedEmbeddings, keras_serializable, unpack_inputs, ) from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_opt import OPTConfig import numpy as np import tensorflow as tf
9,912
) @add_start_docstrings( "The bare TF OPT Model outputting raw hidden-states without any specific head on top.", OPT_START_DOCSTRING, ) @keras_serializable class TFOPTModel(TFOPTPreTrainedModel): config_class = OPTConfig def __init__(self, config: OPTConfig, **kwargs): super().__init__(config, **kwargs) self.config = config self.model = TFOPTMainLayer(config, name="model") def get_input_embeddings(self): return self.model.decoder.embed_tokens def set_input_embeddings(self, new_embeddings): self.model.set_input_embeddings(new_embeddings) @unpack_inputs @add_start_docstrings_to_model_forward(OPT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPast, config_class=_CONFIG_FOR_DOC, expected_output=_EXPECTED_OUTPUT_SHAPE, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, **kwargs, ) -> Union[TFBaseModelOutputWithPast, Tuple[tf.Tensor]]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.model( input_ids, attention_mask=attention_mask, head_mask=head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) if not return_dict: return outputs return TFBaseModelOutputWithPast( last_hidden_state=outputs.last_hidden_state, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def serving_output(self, output): pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None return TFBaseModelOutputWithPast( last_hidden_state=output.last_hidden_state, past_key_values=pkv, hidden_states=hs, attentions=attns, ) @add_start_docstrings( """ The OPT Model transformer with a language modeling head on top. """, OPT_START_DOCSTRING, ) @keras_serializable class TFOPTForCausalLM(TFOPTPreTrainedModel, TFCausalLanguageModelingLoss): config_class = OPTConfig def __init__(self, config: OPTConfig, **kwargs): super().__init__(config, **kwargs) self.config = config self.model = TFOPTMainLayer(config, name="model") def get_output_embeddings(self): return self.model.get_input_embeddings() def prepare_inputs_for_generation(self, inputs, past_key_values=None, use_cache=None, **kwargs): attention_mask = kwargs.get("attention_mask", None) # only last token for inputs_ids if past is defined in kwargs if past_key_values: inputs = tf.expand_dims(inputs[:, -1], -1) return { "input_ids": inputs, "attention_mask": attention_mask, "past_key_values": past_key_values, "use_cache": use_cache, } @unpack_inputs
# coding=utf-8 # Copyright 2022 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TF 2.0 OPT model.""" from __future__ import annotations # Public API logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "facebook/opt-350m" _CONFIG_FOR_DOC = "OPTConfig" # Base model docstring _EXPECTED_OUTPUT_SHAPE = [1, 8, 1024] # Causal LM output _CAUSAL_LM_EXPECTED_OUTPUT = ( "Hey, are you conscious? Can you talk to me?\nI'm not conscious. I'm just a little bit of a weirdo." ) LARGE_NEGATIVE = -1e8 def _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int = 0): """ Make causal mask used for bi-directional self-attention. """ bsz = input_ids_shape[0] tgt_len = input_ids_shape[1] # We need triu with k = 1 but TF expects known compile-time dims for that, so we hack around it mask = tf.fill((tgt_len, tgt_len), tf.cast(LARGE_NEGATIVE, tf.float32)) mask = tf.linalg.band_part(mask, 0, -1) - tf.linalg.band_part(mask, 0, 0) if past_key_values_length > 0: mask = tf.concat([tf.zeros((tgt_len, past_key_values_length)), mask], axis=-1) return tf.tile(mask[None, None, :, :], (bsz, 1, 1, 1)) # Copied from transformers.models.bart.modeling_tf_bart._expand_mask def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ src_len = shape_list(mask)[1] tgt_len = tgt_len if tgt_len is not None else src_len one_cst = tf.constant(1.0) mask = tf.cast(mask, dtype=one_cst.dtype) expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1)) return (one_cst - expanded_mask) * LARGE_NEGATIVE class TFOPTLearnedPositionalEmbedding(tf.keras.layers.Embedding): """ This module learns positional embeddings up to a fixed maximum size. """ def __init__(self, num_embeddings: int, embedding_dim: int, **kwargs): # OPT is set up so that if padding_idx is specified then offset the embedding ids by 2 # and adjust num_embeddings appropriately. Other models don't have this hack self.offset = 2 super().__init__(num_embeddings + self.offset, embedding_dim, **kwargs) def call(self, attention_mask, past_key_values_length: int = 0): """`input_ids_shape` is expected to be [bsz x seqlen].""" attention_mask = tf.cast(attention_mask, tf.int64) # create positions depending on attention_mask positions = tf.math.cumsum(attention_mask, axis=1) * attention_mask - 1 # cut positions if `past_key_values_length` is > 0 positions = positions[:, past_key_values_length:] return super().call(positions + self.offset) # Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention with Bart->OPT class TFOPTAttention(tf.keras.layers.Layer): """Multi-headed attention from "Attention Is All You Need""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, **kwargs, ): super().__init__(**kwargs) self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = tf.keras.layers.Dropout(dropout) self.head_dim = embed_dim // num_heads if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.k_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="k_proj") self.q_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj") self.v_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj") self.out_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj") def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int): return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3)) def call( self, hidden_states: tf.Tensor, key_value_states: tf.Tensor | None = None, past_key_value: Tuple[Tuple[tf.Tensor]] | None = None, attention_mask: tf.Tensor | None = None, layer_head_mask: tf.Tensor | None = None, training: Optional[bool] = False, ) -> Tuple[tf.Tensor, tf.Tensor | None]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, embed_dim = shape_list(hidden_states) # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = tf.concat([past_key_value[0], key_states], axis=2) value_states = tf.concat([past_key_value[1], value_states], axis=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape) key_states = tf.reshape(key_states, proj_shape) value_states = tf.reshape(value_states, proj_shape) src_len = shape_list(key_states)[1] attn_weights = tf.matmul(query_states, key_states, transpose_b=True) tf.debugging.assert_equal( shape_list(attn_weights), [bsz * self.num_heads, tgt_len, src_len], message=( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {shape_list(attn_weights)}" ), ) if attention_mask is not None: tf.debugging.assert_equal( shape_list(attention_mask), [bsz, 1, tgt_len, src_len], message=( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" f" {shape_list(attention_mask)}" ), ) attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype) attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) attn_weights = stable_softmax(attn_weights, axis=-1) if layer_head_mask is not None: tf.debugging.assert_equal( shape_list(layer_head_mask), [self.num_heads], message=( f"Head mask for a single layer should be of size {(self.num_heads)}, but is" f" {shape_list(layer_head_mask)}" ), ) attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape( attn_weights, (bsz, self.num_heads, tgt_len, src_len) ) attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) attn_probs = self.dropout(attn_weights, training=training) attn_output = tf.matmul(attn_probs, value_states) tf.debugging.assert_equal( shape_list(attn_output), [bsz * self.num_heads, tgt_len, self.head_dim], message=( f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" f" {shape_list(attn_output)}" ), ) attn_output = tf.transpose( tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3) ) attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim)) attn_output = self.out_proj(attn_output) attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) return attn_output, attn_weights, past_key_value class TFOPTDecoderLayer(tf.keras.layers.Layer): def __init__(self, config: OPTConfig, **kwargs): super().__init__(**kwargs) self.do_layer_norm_before = config.do_layer_norm_before self.embed_dim = config.hidden_size self.self_attn = TFOPTAttention( embed_dim=self.embed_dim, num_heads=config.num_attention_heads, dropout=config.attention_dropout, name="self_attn", is_decoder=True, ) self.dropout = tf.keras.layers.Dropout(config.dropout) self.activation_fn = get_tf_activation(config.activation_function) self.self_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm") self.fc1 = tf.keras.layers.Dense(config.ffn_dim, name="fc1") self.fc2 = tf.keras.layers.Dense(self.embed_dim, name="fc2") self.final_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm") def call( self, hidden_states: tf.Tensor, attention_mask: np.ndarray | tf.Tensor | None = None, layer_head_mask: tf.Tensor | None = None, past_key_value: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, training: Optional[bool] = False, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, ) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]: """ Args: hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`tf.Tensor`, *optional*): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`tf.Tensor`, *optional*): mask for attention heads in a given layer of size `(decoder_attention_heads,)` past_key_value (`Tuple(tf.Tensor)`, *optional*): cached past key and value projection states training (`bool`, *optional*, defaults to `False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ residual = hidden_states # 125m, 1.7B, ..., 175B applies layer norm BEFORE attention if self.do_layer_norm_before: hidden_states = self.self_attn_layer_norm(hidden_states) # Self Attention # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None # add present self-attn cache to positions 1,2 of present_key_value tuple hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states # 350m applies layer norm AFTER attention if not self.do_layer_norm_before: hidden_states = self.self_attn_layer_norm(hidden_states) # Fully Connected residual = hidden_states # 125m, 1.7B, ..., 175B applies layer norm BEFORE attention if self.do_layer_norm_before: hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states # 350m applies layer norm AFTER attention if not self.do_layer_norm_before: hidden_states = self.final_layer_norm(hidden_states) return (hidden_states, self_attn_weights, present_key_value) OPT_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional argument. The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` Note that when creating models and layers with [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry about any of this, as you can just pass inputs like you would to any other Python function! </Tip> Args: config ([`OPTConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ @add_start_docstrings( "The bare OPT Model outputting raw hidden-states without any specific head on top.", OPT_START_DOCSTRING, ) class TFOPTPreTrainedModel(TFPreTrainedModel): """ TFOPT Pretrained Model that inheritates from transformers.TFPreTrainedModel Args: config: OPTConfig """ config_class = OPTConfig base_model_prefix = "model" OPT_INPUTS_DOCSTRING = r""" Args: input_ids (`tf.Tensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`tf.Tensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`) contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*, defaults to `True`): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). Set to `False` during training, `True` during generation output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ @keras_serializable class TFOPTDecoder(tf.keras.layers.Layer): config_class = OPTConfig def __init__(self, config: OPTConfig, **kwargs): super().__init__(**kwargs) self.config = config self.padding_idx = config.pad_token_id self.layerdrop = config.layerdrop num_embeddings = config.max_position_embeddings self.embed_tokens = TFSharedEmbeddings( config.vocab_size, config.word_embed_proj_dim, config.pad_token_id, name="embed_tokens" ) self.embed_positions = TFOPTLearnedPositionalEmbedding( num_embeddings, config.hidden_size, name="embed_positions", ) # Note that the only purpose of `config._remove_final_layer_norm` is to keep backward compatibility # with checkpoints that have been fine-tuned before transformers v4.20.1 # see https://github.com/facebookresearch/metaseq/pull/164 if config.do_layer_norm_before and not config._remove_final_layer_norm: self.final_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm") else: self.final_layer_norm = None if config.word_embed_proj_dim != config.hidden_size: self.project_out = tf.keras.layers.Dense(config.word_embed_proj_dim, name="project_out", use_bias=False) self.project_in = tf.keras.layers.Dense(config.hidden_size, name="project_in", use_bias=False) else: self.project_in = None self.project_out = None self.layers = [TFOPTDecoderLayer(config, name=f"layers.{i}") for i in range(config.num_hidden_layers)] self.dropout = tf.keras.layers.Dropout(config.dropout) def get_embed_tokens(self): return self.embed_tokens def set_embed_tokens(self, embed_tokens): self.embed_tokens = embed_tokens def set_input_embeddings(self, new_embeddings): self.embed_tokens.vocab_size = new_embeddings.shape[0] self.embed_tokens.weight = new_embeddings def get_input_embeddings(self): return self.embed_tokens def _prepare_decoder_attention_mask(self, attention_mask, input_shape, past_key_values_length): # create causal mask # # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] _, seq_length = input_shape tf.debugging.assert_equal( seq_length + past_key_values_length, shape_list(attention_mask)[1], message="Attention mask shape should be (batch_size, seq_length + past_key_values_length)" f" but is {shape_list(attention_mask)[1]} with input_ids shape {input_shape} and past length" f" {past_key_values_length}.", ) expanded_attn_mask = _expand_mask(attention_mask, tgt_len=input_shape[-1]) if seq_length > 1: combined_attention_mask = ( _make_causal_mask(input_shape, past_key_values_length=past_key_values_length) + expanded_attn_mask ) else: combined_attention_mask = expanded_attn_mask return combined_attention_mask @unpack_inputs def call( self, input_ids: TFModelInputType | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, ) -> Union[TFBaseModelOutputWithPast, Tuple[tf.Tensor]]: r""" Args: input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. training (`bool`, *optional*, defaults to `False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") past_key_values_length = shape_list(past_key_values[0][0])[2] if past_key_values is not None else 0 if inputs_embeds is None: check_embeddings_within_bounds(input_ids, self.embed_tokens.vocab_size) inputs_embeds = self.embed_tokens(input_ids) if attention_mask is None: attention_mask = tf.ones((input_shape[0], input_shape[1] + past_key_values_length), dtype=tf.bool) else: tf.debugging.assert_equal( shape_list(attention_mask)[1], past_key_values_length + input_shape[1], message=( f"The provided attention mask has length {tf.shape(attention_mask)[1]}, but its length should be " f"{past_key_values_length + input_shape[1]} (sum of the lengths of current and past inputs)" ), ) pos_embeds = self.embed_positions(attention_mask, past_key_values_length) attention_mask = self._prepare_decoder_attention_mask(attention_mask, input_shape, past_key_values_length) if self.project_in is not None: inputs_embeds = self.project_in(inputs_embeds) hidden_states = inputs_embeds + pos_embeds # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None present_key_values = () if use_cache else None # check if head_mask and cross_attn_head_mask have a correct number of layers specified if desired for attn_mask_name, attn_mask in [("head_mask", head_mask)]: if attn_mask is not None: tf.debugging.assert_equal( shape_list(attn_mask)[0], len(self.layers), message=( f"The {attn_mask_name} should be specified for {len(self.layers)} layers, but it is for" f" {shape_list(attn_mask)[0]}." ), ) for idx, decoder_layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) past_key_value = past_key_values[idx] if past_key_values is not None else None hidden_states, layer_self_attn, present_key_value = decoder_layer( hidden_states, attention_mask=attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, past_key_value=past_key_value, ) if use_cache: present_key_values += (present_key_value,) if output_attentions: all_self_attns += (layer_self_attn,) if self.final_layer_norm is not None: hidden_states = self.final_layer_norm(hidden_states) if self.project_out is not None: hidden_states = self.project_out(hidden_states) if output_hidden_states: all_hidden_states += (hidden_states,) if not return_dict: return tuple( v for v in [hidden_states, present_key_values, all_hidden_states, all_self_attns] if v is not None ) else: return TFBaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=present_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, ) @keras_serializable class TFOPTMainLayer(tf.keras.layers.Layer): config_class = OPTConfig def __init__(self, config: OPTConfig, **kwargs): super().__init__(**kwargs) self.config = config self.decoder = TFOPTDecoder(config, name="decoder") def get_input_embeddings(self): return self.decoder.embed_tokens def set_input_embeddings(self, new_embeddings): self.decoder.set_input_embeddings(new_embeddings) @unpack_inputs def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, **kwargs, ) -> Union[TFBaseModelOutputWithPast, Tuple[tf.Tensor]]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.decoder( input_ids, attention_mask=attention_mask, head_mask=head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) if not return_dict: return outputs return TFBaseModelOutputWithPast( last_hidden_state=outputs.last_hidden_state, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( "The bare TF OPT Model outputting raw hidden-states without any specific head on top.", OPT_START_DOCSTRING, ) @keras_serializable class TFOPTModel(TFOPTPreTrainedModel): config_class = OPTConfig def __init__(self, config: OPTConfig, **kwargs): super().__init__(config, **kwargs) self.config = config self.model = TFOPTMainLayer(config, name="model") def get_input_embeddings(self): return self.model.decoder.embed_tokens def set_input_embeddings(self, new_embeddings): self.model.set_input_embeddings(new_embeddings) @unpack_inputs @add_start_docstrings_to_model_forward(OPT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPast, config_class=_CONFIG_FOR_DOC, expected_output=_EXPECTED_OUTPUT_SHAPE, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, **kwargs, ) -> Union[TFBaseModelOutputWithPast, Tuple[tf.Tensor]]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.model( input_ids, attention_mask=attention_mask, head_mask=head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) if not return_dict: return outputs return TFBaseModelOutputWithPast( last_hidden_state=outputs.last_hidden_state, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def serving_output(self, output): pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None return TFBaseModelOutputWithPast( last_hidden_state=output.last_hidden_state, past_key_values=pkv, hidden_states=hs, attentions=attns, ) @add_start_docstrings( """ The OPT Model transformer with a language modeling head on top. """, OPT_START_DOCSTRING, ) @keras_serializable class TFOPTForCausalLM(TFOPTPreTrainedModel, TFCausalLanguageModelingLoss): config_class = OPTConfig def __init__(self, config: OPTConfig, **kwargs): super().__init__(config, **kwargs) self.config = config self.model = TFOPTMainLayer(config, name="model") def get_output_embeddings(self): return self.model.get_input_embeddings() def prepare_inputs_for_generation(self, inputs, past_key_values=None, use_cache=None, **kwargs): attention_mask = kwargs.get("attention_mask", None) # only last token for inputs_ids if past is defined in kwargs if past_key_values: inputs = tf.expand_dims(inputs[:, -1], -1) return { "input_ids": inputs, "attention_mask": attention_mask, "past_key_values": past_key_values, "use_cache": use_cache, } @unpack_inputs
@replace_return_docstrings(output_type=TFCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
2
2023-11-07 04:23:57+00:00
12k
HKU-BAL/ClairS-TO
src/extract_candidates_calling.py
[ { "identifier": "VcfReader", "path": "shared/vcf.py", "snippet": "class VcfReader(object):\n def __init__(self, vcf_fn,\n ctg_name=None,\n ctg_start=None,\n ctg_end=None,\n is_var_format=False,\n is_happy_format=False,\n is_fp=None,\n show_ref=True,\n direct_open=False,\n keep_row_str=False,\n skip_genotype=False,\n filter_tag=None,\n taf_filter=None,\n save_header=False,\n min_qual=None,\n max_qual=None,\n discard_indel=False,\n keep_af=False):\n self.vcf_fn = vcf_fn\n self.ctg_name = ctg_name\n self.ctg_start = ctg_start\n self.ctg_end = ctg_end\n self.variant_dict = defaultdict(Position)\n self.is_var_format = is_var_format\n self.is_happy_format = is_happy_format\n self.is_fp = is_fp\n self.show_ref = show_ref\n self.direct_open = direct_open\n self.keep_row_str = keep_row_str\n self.skip_genotype = skip_genotype\n self.filter_tag = filter_tag # PASS;HighConf PASS;MedConf in hcc1395\n self.taf_filter = taf_filter\n self.header = \"\"\n self.save_header = save_header\n self.discard_indel = discard_indel\n self.min_qual = min_qual\n self.max_qual = max_qual\n self.keep_af = keep_af\n\n def read_vcf(self):\n is_ctg_region_provided = self.ctg_start is not None and self.ctg_end is not None\n\n if self.vcf_fn is None or not os.path.exists(self.vcf_fn):\n return\n\n header_last_column = []\n if self.direct_open:\n vcf_fp = open(self.vcf_fn)\n vcf_fo = vcf_fp\n else:\n vcf_fp = subprocess_popen(shlex.split(\"gzip -fdc %s\" % (self.vcf_fn)))\n vcf_fo = vcf_fp.stdout\n for row in vcf_fo:\n columns = row.strip().split()\n if columns[0][0] == \"#\":\n if self.save_header:\n self.header += row\n header_last_column = columns\n continue\n\n tumor_in_last = True if len(header_last_column) and header_last_column[\n -1].rstrip().lower() == \"tumor\" else False\n # position in vcf is 1-based\n chromosome, position = columns[0], columns[1]\n if self.ctg_name is not None and chromosome != self.ctg_name:\n continue\n if is_ctg_region_provided and not (self.ctg_start <= int(position) <= self.ctg_end):\n continue\n\n FILTER = columns[6] if len(columns) >= 7 else None\n if self.filter_tag is not None:\n filter_list = self.filter_tag.split(',')\n if sum([1 if filter == FILTER else 0 for filter in filter_list]) == 0:\n continue\n self.is_var_format = True if columns[2][0] in 'ACGT' else False\n self.is_var_format = False\n if self.is_var_format:\n reference, alternate = columns[2], columns[3]\n genotype_1 = int(columns[4])\n genotype_2 = int(columns[5])\n else:\n reference, alternate, last_column = columns[3], columns[4], columns[-1]\n\n if self.discard_indel and (len(reference) > 1 or len(alternate) > 1):\n continue\n\n try:\n qual = columns[5] if len(columns) > 5 else None\n\n if self.min_qual is not None and float(qual) < self.min_qual:\n continue\n\n if self.max_qual is not None and float(qual) > self.max_qual:\n continue\n except:\n qual = None\n\n last_column = last_column if not tumor_in_last else columns[-2]\n if self.is_happy_format and self.is_fp:\n last_column = columns[10]\n if self.is_happy_format and not self.is_fp:\n last_column = columns[9]\n genotype = last_column.split(\":\")[0].replace(\"/\", \"|\").replace(\".\", \"0\").split(\"|\")\n try:\n genotype_1, genotype_2 = genotype\n\n if int(genotype_1) > int(genotype_2):\n genotype_1, genotype_2 = genotype_2, genotype_1\n\n # remove * to guarentee vcf match\n if '*' in alternate:\n alternate = alternate.split(',')\n if int(genotype_1) + int(genotype_2) != 3 or len(alternate) != 2:\n print('error with variant representation')\n continue\n alternate = ''.join([alt_base for alt_base in alternate if alt_base != '*'])\n # * always have a genotype 1/2\n\n genotype_1, genotype_2 = '0', '1'\n except:\n genotype_1 = -1\n genotype_2 = -1\n if self.keep_af:\n tag_list = columns[8].split(':')\n if 'AF' in tag_list or 'VAF' in tag_list:\n taf_index = tag_list.index('AF') if 'AF' in tag_list else tag_list.index('VAF')\n taf = float(columns[9].split(':')[taf_index])\n else:\n taf = None\n else:\n taf = None\n position = int(position)\n have_extra_infos = 'VT' in row\n\n if genotype_1 == \"0\" and genotype_2 == \"0\" and not self.show_ref and not self.skip_genotype:\n continue\n extra_infos = columns[-1].split(':')[-1] if have_extra_infos else ''\n row_str = row if self.keep_row_str else False\n key = (chromosome, position) if self.ctg_name is None else position\n\n self.variant_dict[key] = Position(ctg_name=chromosome,\n pos=position,\n ref_base=reference,\n alt_base=alternate,\n genotype1=int(genotype_1),\n genotype2=int(genotype_2),\n qual=qual,\n row_str=row_str,\n af=taf,\n filter=FILTER,\n extra_infos=extra_infos)\n\n def get_alt_info(self, pos, extra_info=\"\"):\n pos = int(pos)\n if pos not in self.variant_dict:\n return \"\"\n ref_base = self.variant_dict[pos].reference_bases\n alt_base = ','.join(self.variant_dict[pos].alternate_bases)\n gentoype_str = '/'.join([str(g) for g in self.variant_dict[pos].genotype])\n extra_info = self.variant_dict[pos].extra_infos if self.variant_dict[pos].extra_infos != \"\" else extra_info\n return extra_info + '_' + ref_base + '_' + alt_base + '_' + gentoype_str" }, { "identifier": "VcfWriter", "path": "shared/vcf.py", "snippet": "class VcfWriter(object):\n def __init__(self,\n vcf_fn,\n ctg_name=None,\n ref_fn=None,\n sample_name=\"SAMPLE\",\n write_header=True,\n header=None,\n cmdline=None,\n show_ref_calls=False):\n self.vcf_fn = vcf_fn\n self.show_ref_calls = show_ref_calls\n # make directory if not exist\n vcf_folder = os.path.dirname(self.vcf_fn)\n if not os.path.exists(vcf_folder):\n print(\"[INFO] Output VCF folder {} not found, create it\".format(vcf_folder))\n return_code = run(\"mkdir -p {}\".format(vcf_folder), shell=True)\n\n self.vcf_writer = open(self.vcf_fn, 'w')\n self.ref_fn = ref_fn\n self.ctg_name = ctg_name\n if ctg_name is not None:\n self.ctg_name_list = ctg_name.split(',') if ',' in ctg_name else [ctg_name]\n else:\n self.ctg_name_list = None\n self.sample_name = sample_name\n if write_header:\n self.write_header(ref_fn=ref_fn, header=header, cmdline=cmdline)\n\n def close(self):\n try:\n self.vcf_writer.close()\n except:\n pass\n\n def write_header(self, ctg_name=None, ref_fn=None, header=None, cmdline=None):\n header = vcf_header if header is None else header\n if cmdline is not None and cmdline != \"\":\n header_list = header.rstrip('\\n').split('\\n')\n insert_index = 3 if len(header_list) >= 3 else len(header_list) - 1\n header_list.insert(insert_index, \"##cmdline={}\".format(cmdline))\n header = \"\\n\".join(header_list) + '\\n'\n if self.ref_fn is not None:\n reference_index_file_path = file_path_from(self.ref_fn, suffix=\".fai\", exit_on_not_found=True, sep='.')\n with open(reference_index_file_path, \"r\") as fai_fp:\n for row in fai_fp:\n columns = row.strip().split(\"\\t\")\n contig_name, contig_size = columns[0], columns[1]\n if self.ctg_name_list is not None and contig_name not in self.ctg_name_list:\n continue\n header += \"##contig=<ID=%s,length=%s>\\n\" % (contig_name, contig_size)\n\n header += '#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\tFORMAT\\t%s\\n' % (self.sample_name)\n\n self.vcf_writer.write(header)\n\n def write_row(self,\n POS=None,\n REF=None,\n ALT=None,\n QUAL=0,\n GT='0/0',\n DP=0,\n AF=0,\n AD=None,\n CHROM=None,\n GQ=None,\n ID='.',\n FILTER=\".\",\n INFO='.',\n TAF=None,\n VT=None,\n TDP=None,\n AU=None,\n CU=None,\n GU=None,\n TU=None,\n row_str=None):\n if row_str is not None:\n self.vcf_writer.write(row_str)\n return\n GQ = GQ if GQ else QUAL\n CHROM = CHROM if CHROM else self.ctg_name\n if not self.show_ref_calls and (GT == \"0/0\" or GT == \"./.\"):\n return\n FORMAT = \"GT:GQ:DP:AF\"\n FORMAT_V = \"%s:%.4f:%d:%.4f\" % (GT, GQ, DP, AF)\n basic_vcf_format = \"%s\\t%d\\t%s\\t%s\\t%s\\t%.4f\\t%s\\t%s\" % (\n CHROM,\n int(POS),\n ID,\n REF,\n ALT,\n QUAL,\n FILTER,\n INFO\n )\n if AD is not None and AD != \"\":\n FORMAT += \":AD\"\n FORMAT_V += \":%s\" % (AD)\n if TAF is not None:\n FORMAT += \":TAF\"\n FORMAT_V += \":%.4f\" % (TAF)\n if TDP is not None:\n FORMAT += \":TDP\"\n FORMAT_V += \":%d\" % (TDP)\n if AU is not None and CU is not None and GU is not None and TU is not None:\n FORMAT += \":AU:CU:GU:TU\"\n FORMAT_V += \":%d:%d:%d:%d\" % (AU, CU, GU, TU)\n\n if VT is not None:\n FORMAT += \":VT\"\n FORMAT_V += \":%s\" % (VT)\n vcf_format = '\\t'.join([basic_vcf_format, FORMAT, FORMAT_V]) + \"\\n\"\n\n self.vcf_writer.write(vcf_format)" }, { "identifier": "subprocess_popen", "path": "shared/utils.py", "snippet": "def subprocess_popen(args, stdin=None, stdout=PIPE, stderr=stderr, bufsize=8388608):\n return Popen(args, stdin=stdin, stdout=stdout, stderr=stderr, bufsize=bufsize, universal_newlines=True)" }, { "identifier": "file_path_from", "path": "shared/utils.py", "snippet": "def file_path_from(file_name, suffix=\"\", exit_on_not_found=False, sep=\"\", allow_none=False, is_directory=False):\n if allow_none and file_name is None:\n return None\n if is_directory:\n is_folder_exists(file_name, suffix)\n if exit_on_not_found:\n exit(log_error(\"[ERROR] directory %s not found\" % (file_name + suffix)))\n if is_file_exists(file_name, suffix):\n return abspath(file_name + suffix)\n #allow fn.bam.bai->fn.bai fn.fa.fai->fn.fai\n elif sep != \"\" and len(sep) == 1:\n file_name_remove_suffix = sep.join(file_name.split(sep)[:-1])\n if is_file_exists(file_name_remove_suffix, suffix):\n return abspath(file_name_remove_suffix + suffix)\n if exit_on_not_found:\n exit(log_error(\"[ERROR] file %s not found\" % (file_name + suffix)))\n return None" }, { "identifier": "region_from", "path": "shared/utils.py", "snippet": "def region_from(ctg_name, ctg_start=None, ctg_end=None):\n \"\"\"\n 1-based region string [start, end]\n \"\"\"\n if ctg_name is None:\n return \"\"\n if (ctg_start is None) != (ctg_end is None):\n return \"\"\n\n if ctg_start is None and ctg_end is None:\n return \"{}\".format(ctg_name)\n return \"{}:{}-{}\".format(ctg_name, ctg_start, ctg_end)" }, { "identifier": "reference_sequence_from", "path": "shared/utils.py", "snippet": "def reference_sequence_from(samtools_execute_command, fasta_file_path, regions):\n refernce_sequences = []\n region_value_for_faidx = \" \".join(regions)\n\n samtools_faidx_process = subprocess_popen(\n shlex.split(\"{} faidx {} {}\".format(samtools_execute_command, fasta_file_path, region_value_for_faidx))\n )\n while True:\n row = samtools_faidx_process.stdout.readline()\n is_finish_reading_output = row == '' and samtools_faidx_process.poll() is not None\n if is_finish_reading_output:\n break\n if row:\n refernce_sequences.append(row.rstrip())\n\n # first line is reference name \">xxxx\", need to be ignored\n reference_sequence = \"\".join(refernce_sequences[1:])\n\n # uppercase for masked sequences\n reference_sequence = reference_sequence.upper()\n\n samtools_faidx_process.stdout.close()\n samtools_faidx_process.wait()\n if samtools_faidx_process.returncode != 0:\n return None\n\n return reference_sequence" }, { "identifier": "str2bool", "path": "shared/utils.py", "snippet": "def str2bool(v):\n if v is None:\n return v\n if isinstance(v, bool):\n return v\n if v.lower() in ('yes', 'ture', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'flase', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')" }, { "identifier": "str_none", "path": "shared/utils.py", "snippet": "def str_none(v):\n if v is None:\n return None\n if v.upper() == \"NONE\":\n return None\n if isinstance(v, str):\n return v" }, { "identifier": "bed_tree_from", "path": "shared/interval_tree.py", "snippet": "def bed_tree_from(bed_file_path,\n expand_region=None,\n contig_name=None,\n bed_ctg_start=None,\n bed_ctg_end=None,\n return_bed_region=False,\n padding=None,\n region=None):\n \"\"\"\n 0-based interval tree [start, end)\n \"\"\"\n\n tree = {}\n if region is not None:\n try:\n ctg_name, start_end = region.split(':')\n ctg_start, ctg_end = int(start_end.split('-')[0]) - 1, int(start_end.split('-')[1]) - 1 # bed format\n except:\n sys.exit(\"[ERROR] Please input the correct format for --region ctg_name:start-end, your input is {}\".format(region))\n if ctg_end < ctg_start or ctg_start < 0 or ctg_end < 0:\n sys.exit(\"[ERROR] Invalid region input: {}\".format(region))\n\n if ctg_name not in tree:\n tree[ctg_name] = IntervalTree()\n tree[ctg_name].addi(ctg_start, ctg_end)\n if return_bed_region:\n return tree, None, None\n return tree\n\n if bed_file_path is None or bed_file_path == \"\":\n if return_bed_region:\n return tree, None, None\n return tree\n\n bed_start, bed_end = float('inf'), 0\n unzip_process = subprocess_popen(shlex.split(\"gzip -fdc %s\" % (bed_file_path)))\n for row_id, row in enumerate(unzip_process.stdout):\n if row[0] == '#':\n continue\n columns = row.strip().split()\n\n ctg_name = columns[0]\n if contig_name != None and ctg_name != contig_name:\n continue\n if ctg_name not in tree:\n tree[ctg_name] = IntervalTree()\n\n ctg_start, ctg_end = int(columns[1]), int(columns[2])\n\n if ctg_end < ctg_start or ctg_start < 0 or ctg_end < 0:\n sys.exit(\"[ERROR] Invalid bed input in {}-th row {} {} {}\".format(row_id+1, ctg_name, ctg_start, ctg_end))\n\n if bed_ctg_start and bed_ctg_end:\n if ctg_end < bed_ctg_start or ctg_start > bed_ctg_end:\n continue\n if padding:\n ctg_start += padding\n ctg_end -= padding\n bed_start = min(ctg_start, bed_start)\n bed_end = max(ctg_end, bed_end)\n if ctg_start == ctg_end:\n ctg_end += 1\n\n tree[ctg_name].addi(ctg_start, ctg_end)\n\n unzip_process.stdout.close()\n unzip_process.wait()\n if return_bed_region:\n return tree, bed_start, bed_end\n return tree" }, { "identifier": "is_region_in", "path": "shared/interval_tree.py", "snippet": "def is_region_in(tree, contig_name, region_start=None, region_end=None):\n if not tree or (contig_name is None) or (contig_name not in tree):\n return False\n\n interval_tree = tree[contig_name]\n return len(\n interval_tree.at(region_start)\n if region_end is None else\n interval_tree.overlap(begin=region_start, end=region_end)\n ) > 0" } ]
import sys import shlex import os import logging import subprocess import shared.param as param from argparse import ArgumentParser, SUPPRESS from collections import Counter, defaultdict from shared.vcf import VcfReader, VcfWriter from shared.utils import subprocess_popen, file_path_from, region_from, \ reference_sequence_from, str2bool, str_none from shared.interval_tree import bed_tree_from, is_region_in
8,269
DP=10, AF=float(tumor_af)) vcf_writer.close() if select_indel_candidates: print("[INFO] {} chunk {}/{}: Total snv candidates found: {}, total indel candidates found: {}".format(ctg_name, \ chunk_id, chunk_num, len(snv_candidates_list), len(indel_candidates_list))) else: print("[INFO] {} chunk {}/{}: Total candidates found: {}".format(ctg_name, chunk_id, chunk_num, len(snv_candidates_list))) if candidates_folder is not None and len(snv_candidates_list): all_candidates_regions = [] region_num = len(snv_candidates_list) // split_bed_size + 1 if len( snv_candidates_list) % split_bed_size else len(snv_candidates_list) // split_bed_size for idx in range(region_num): # a windows region for create tensor # samtools mpileup not include last position split_output = snv_candidates_list[idx * split_bed_size: (idx + 1) * split_bed_size] output_path = os.path.join(candidates_folder, '{}.{}_{}_{}'.format(ctg_name, chunk_id, idx, region_num)) all_candidates_regions.append(output_path) with open(output_path, 'w') as output_file: output_file.write('\n'.join( ['\t'.join([ctg_name, str(x - flankingBaseNum - 1), str(x + flankingBaseNum + 1)]) for x in split_output]) + '\n') # bed format all_candidates_regions_path = os.path.join(candidates_folder, 'CANDIDATES_FILE_{}_{}'.format(ctg_name, chunk_id)) with open(all_candidates_regions_path, 'w') as output_file: output_file.write('\n'.join(all_candidates_regions) + '\n') if select_indel_candidates and candidates_folder is not None and len(indel_candidates_list): all_candidates_regions = [] region_num = len(indel_candidates_list) // split_bed_size + 1 if len( indel_candidates_list) % split_bed_size else len(indel_candidates_list) // split_bed_size for idx in range(region_num): # a windows region for create tensor # samtools mpileup not include last position split_output = indel_candidates_list[idx * split_bed_size: (idx + 1) * split_bed_size] output_path = os.path.join(candidates_folder, '{}.{}_{}_{}_indel'.format(ctg_name, chunk_id, idx, region_num)) all_candidates_regions.append(output_path) with open(output_path, 'w') as output_file: output_file.write('\n'.join( ['\t'.join([ctg_name, str(x - flankingBaseNum - 1), str(x + flankingBaseNum + 1)]) for x in split_output]) + '\n') # bed format all_candidates_regions_path = os.path.join(candidates_folder, 'INDEL_CANDIDATES_FILE_{}_{}'.format(ctg_name, chunk_id)) with open(all_candidates_regions_path, 'w') as output_file: output_file.write('\n'.join(all_candidates_regions) + '\n') if hybrid_mode_vcf_fn is not None or genotyping_mode_vcf_fn is not None: hybrid_output_path = os.path.join(candidates_folder, '{}.{}_hybrid_info'.format(ctg_name, chunk_id)) with open(hybrid_output_path, 'w') as output_file: for k, v in hybrid_info_dict.items(): output_info = '\t'.join([ctg_name, str(k), v.ref_base, v.tumor_alt_info]) output_file.write(output_info + '\n') samtools_mpileup_process.stdout.close() samtools_mpileup_process.wait() if alt_fn: alt_fp.close() def main(): parser = ArgumentParser(description="Generate tumor variant candidates for tensor creation in calling") parser.add_argument('--platform', type=str, default='ont', help="Select the sequencing platform of the input. Default: %(default)s") parser.add_argument('--candidates_folder', type=str, default=None, help="Output candidate folder to store the candidate bed information, required") parser.add_argument('--tumor_bam_fn', type=str, default=None, help="Sorted tumor BAM file input, required") parser.add_argument('--ref_fn', type=str, default=None, help="Reference fasta file input, required") parser.add_argument('--snv_min_af', type=float, default=param.snv_min_af, help="Minimum SNV allele frequency in the tumor sample for a site to be considered as a candidate site in tumor sample, default: %(default)f") parser.add_argument('--ctg_name', type=str, default=None, help="The name of sequence to be processed, required if --bed_fn is not defined") parser.add_argument('--ctg_start', type=int, default=None, help="The 1-based starting position of the sequence to be processed, optional, will process the whole --ctg_name if not set") parser.add_argument('--ctg_end', type=int, default=None, help="The 1-based inclusive ending position of the sequence to be processed, optional, will process the whole --ctg_name if not set") parser.add_argument('--bed_fn', type=str, default=None, help="Call variant only in the provided regions. Will take an intersection if --ctg_name and/or (--ctg_start, --ctg_end) are set") parser.add_argument('--samtools', type=str, default="samtools", help="Path to the 'samtools', samtools version >= 1.10 is required. default: %(default)s") # options for advanced users parser.add_argument('--min_coverage', type=float, default=param.min_coverage, help="EXPERIMENTAL: Minimum coverage required in both normal and tumor sample to call a variant, default: %(default)f") parser.add_argument('--min_mq', type=int, default=param.min_mq, help="EXPERIMENTAL: If set, reads with mapping quality with <$min_mq are filtered, default: %(default)d") parser.add_argument('--min_bq', type=int, default=None, help="EXPERIMENTAL: If set, bases with base quality with <$min_bq are filtered, default: %(default)d") parser.add_argument('--max_depth', type=int, default=None, help="EXPERIMENTAL: Maximum depth to be processed. default: %(default)s") parser.add_argument('--alternative_base_num', type=int, default=param.alternative_base_num, help="EXPERIMENTAL: Minimum alternative base number to process a candidate. default: %(default)s")
# BSD 3-Clause License # # Copyright 2023 The University of Hong Kong, Department of Computer Science # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. logging.basicConfig(format='%(message)s', level=logging.INFO) class AltInfo(object): def __init__(self, ref_base='', tumor_alt_info=""): self.ref_base = ref_base self.tumor_alt_info = tumor_alt_info def decode_pileup_bases(pileup_bases, reference_base, min_coverage, minimum_snv_af_for_candidate, minimum_indel_af_for_candidate, alternative_base_num, has_pileup_candidates, read_name_list, is_tumor, select_indel_candidates=False, platform="ont"): """ Decode mpileup input string. pileup_bases: pileup base string for each position, include all mapping information. reference_base: upper reference base for cigar calculation. pileup_dict: dictionary (pos: pos info) which keep read information that cover specific position. ref_seq: chunked reference sequence in window, start: center pos - flankingBaseNum, end: center + flankingBaseNum + 1. reference_sequence: reference sequence index by contig:start-end. 0-based. minimum_af_for_candidate: default minimum alleic frequency for candidate filtering, filter if below specific thredshold. has_pileup_candidates: if the candidate is directly obtained from pileup output, then no need to check the af filtering. """ base_idx = 0 base_list = [] while base_idx < len(pileup_bases): base = pileup_bases[base_idx] if base == '+' or base == '-': base_idx += 1 advance = 0 while True: num = pileup_bases[base_idx] if num.isdigit(): advance = advance * 10 + int(num) base_idx += 1 else: break base_list[-1][1] = base + pileup_bases[base_idx: base_idx + advance] # add indel seq base_idx += advance - 1 elif base in "ACGTNacgtn#*": base_list.append([base, ""]) elif base == '^': # start of read, next base is mq, update mq info base_idx += 1 # skip $, the end of read base_idx += 1 pileup_dict = defaultdict(int) base_counter = Counter([''.join(item) for item in base_list]) alt_dict = dict(Counter([''.join(item).upper() for item in base_list])) tumor_alt_dict = dict(Counter([''.join(item).upper() for item, read_name in zip(base_list, read_name_list) if read_name.startswith('t')])) if is_tumor else None depth = 0 for key, count in base_counter.items(): if key[0].upper() in 'ACGT': pileup_dict[key[0].upper()] += count depth += count elif key[0] in "#*": depth += count if len(key) > 1 and key[1] == '+': if select_indel_candidates: pileup_dict['I' + key[0].upper() + key[2:].upper()] += count else: pileup_dict['I'] += count elif len(key) > 1 and key[1] == '-': if select_indel_candidates: pileup_dict['D' + len(key[2:]) * "N"] += count else: pileup_dict['D'] += count denominator = depth if depth > 0 else 1 pileup_list = sorted(list(pileup_dict.items()), key=lambda x: x[1], reverse=True) pass_snv_af = False pass_indel_af = False pass_depth = depth > min_coverage for item, count in pileup_list: if item == reference_base: continue elif item[0] in 'ID': if select_indel_candidates: pass_indel_af = (pass_indel_af or (float(count) / denominator >= minimum_indel_af_for_candidate and ( alternative_base_num is not None and count >= alternative_base_num))) continue pass_snv_af = pass_snv_af or (float(count) / denominator >= minimum_snv_af_for_candidate) and ( alternative_base_num is not None and count >= alternative_base_num) af = (float(pileup_list[1][1]) / denominator) if len(pileup_list) > 1 else 0.0 af = (float(pileup_list[0][1]) / denominator) if len(pileup_list) >= 1 and pileup_list[0][ 0] != reference_base else af pass_af = (pass_snv_af or pass_indel_af) and pass_depth alt_list = sorted(list(alt_dict.items()), key=lambda x: x[1], reverse=True) alt_list = [[item[0], str(round(item[1] / denominator, 3))] for item in alt_list if item[0].upper() != reference_base] if not pass_af: return base_list, depth, pass_af, af, "", "", "", alt_list, pass_snv_af, pass_indel_af, pileup_list pileup_list = [[item[0], str(round(item[1] / denominator, 3))] for item in pileup_list] af_infos = ','.join([item[1] for item in pileup_list if item[0] != reference_base]) pileup_infos = ' '.join([item[0] + ':' + item[1] for item in alt_list]) if tumor_alt_dict is not None: tumor_alt_list = sorted(list(tumor_alt_dict.items()), key=lambda x: x[1], reverse=True) tumor_alt_list = [[item[0], str(round(item[1] / denominator, 3))] for item in tumor_alt_list] tumor_pileup_infos = ' '.join([item[0] + ':' + item[1] for item in tumor_alt_list]) else: tumor_pileup_infos = "" return base_list, depth, pass_af, af, af_infos, pileup_infos, tumor_pileup_infos, alt_list, pass_snv_af, pass_indel_af, pileup_list def extract_pair_candidates(args): ctg_start = args.ctg_start ctg_end = args.ctg_end fasta_file_path = args.ref_fn ctg_name = args.ctg_name samtools_execute_command = args.samtools output_depth = args.output_depth output_alt_info = args.output_alt_info tumor_bam_file_path = args.tumor_bam_fn chunk_id = args.chunk_id - 1 if args.chunk_id else None # 1-base to 0-base chunk_num = args.chunk_num minimum_snv_af_for_candidate = args.snv_min_af minimum_indel_af_for_candidate = args.indel_min_af minimum_snv_af_for_truth = args.min_truth_snv_af minimum_indel_af_for_truth = args.min_truth_snv_af alternative_base_num = args.alternative_base_num split_bed_size = param.split_bed_size candidates_folder = args.candidates_folder min_coverage = args.min_coverage platform = args.platform store_tumor_infos = args.store_tumor_infos alt_fn = args.alt_fn confident_bed_fn = file_path_from(args.bed_fn, allow_none=True, exit_on_not_found=False) is_confident_bed_file_given = confident_bed_fn is not None min_mapping_quality = args.min_mq min_base_quality = args.min_bq flankingBaseNum = param.flankingBaseNum if args.flanking is None else args.flanking no_of_positions = 2 * flankingBaseNum + 1 genotyping_mode_vcf_fn = args.genotyping_mode_vcf_fn hybrid_mode_vcf_fn = args.hybrid_mode_vcf_fn truth_vcf_fn = args.truth_vcf_fn is_truth_vcf_provided = truth_vcf_fn is not None select_indel_candidates = args.select_indel_candidates candidates_set = set() indel_candidates_list = [] snv_candidates_set = set() indel_candidates_set = set() truths_variant_dict = {} if is_truth_vcf_provided: unified_vcf_reader = VcfReader(vcf_fn=truth_vcf_fn, ctg_name=ctg_name, is_var_format=False) unified_vcf_reader.read_vcf() truths_variant_dict = unified_vcf_reader.variant_dict candidates_pos_set = set() add_read_regions = True hybrid_candidate_set = set() indel_hybrid_candidate_set = set() if hybrid_mode_vcf_fn is not None or genotyping_mode_vcf_fn is not None: vcf_fn = hybrid_mode_vcf_fn if hybrid_mode_vcf_fn is not None else genotyping_mode_vcf_fn vcf_reader = VcfReader(vcf_fn=vcf_fn, ctg_name=ctg_name, is_var_format=False) vcf_reader.read_vcf() hybrid_variant_dict = vcf_reader.variant_dict for k, v in hybrid_variant_dict.items(): ref_base, alt_base = v.reference_bases, v.alternate_bases[0] if len(ref_base) > 1 or len(alt_base) > 1: if select_indel_candidates: indel_hybrid_candidate_set.add(k) candidates_set.add(k) hybrid_candidate_set.add(k) hybrid_info_dict = defaultdict(AltInfo) fai_fn = file_path_from(fasta_file_path, suffix=".fai", exit_on_not_found=True, sep='.') if chunk_id is not None: """ Whole genome calling option, acquire contig start end position from reference fasta index(.fai), then split the reference accroding to chunk id and total chunk numbers. """ if is_confident_bed_file_given: # consistent with pileup generation, faster to extract tensor using bed region tree, bed_start, bed_end = bed_tree_from(bed_file_path=confident_bed_fn, contig_name=ctg_name, return_bed_region=True) chunk_size = (bed_end - bed_start) // chunk_num + 1 if (bed_end - bed_start) % chunk_num else ( bed_end - bed_start) // chunk_num ctg_start = bed_start + 1 + chunk_size * chunk_id # 0-base to 1-base ctg_end = ctg_start + chunk_size else: contig_length = 0 with open(fai_fn, 'r') as fai_fp: for row in fai_fp: columns = row.strip().split("\t") contig_name = columns[0] if contig_name != ctg_name: continue contig_length = int(columns[1]) chunk_size = contig_length // chunk_num + 1 if contig_length % chunk_num else contig_length // chunk_num ctg_start = chunk_size * chunk_id # 0-base to 1-base ctg_end = ctg_start + chunk_size candidates_pos_set = set([item for item in candidates_pos_set if item >= ctg_start and item <= ctg_end]) # 1-based regions [start, end] (start and end inclusive) ref_regions = [] reads_regions = [] is_ctg_name_given = ctg_name is not None is_ctg_range_given = is_ctg_name_given and ctg_start is not None and ctg_end is not None if is_ctg_range_given: extend_start = max(ctg_start - ( no_of_positions), 1) extend_end = ctg_end + no_of_positions reads_regions.append(region_from(ctg_name=ctg_name, ctg_start=extend_start, ctg_end=extend_end)) reference_start, reference_end = ctg_start - param.expandReferenceRegion, ctg_end + param.expandReferenceRegion reference_start = 1 if reference_start < 1 else reference_start ref_regions.append(region_from(ctg_name=ctg_name, ctg_start=reference_start, ctg_end=reference_end)) elif is_ctg_name_given: reads_regions.append(region_from(ctg_name=ctg_name)) ref_regions.append(region_from(ctg_name=ctg_name)) reference_start = 1 reference_sequence = reference_sequence_from( samtools_execute_command=samtools_execute_command, fasta_file_path=fasta_file_path, regions=ref_regions ) if reference_sequence is None or len(reference_sequence) == 0: sys.exit("[ERROR] Failed to load reference sequence from file ({}).".format(fasta_file_path)) mq_option = ' --min-MQ {}'.format(min_mapping_quality) bq_option = ' --min-BQ {}'.format(min_base_quality) read_name_option = ' --output-QNAME' if store_tumor_infos else ' ' bed_option = ' -l {}'.format( confident_bed_fn) if is_confident_bed_file_given else "" flags_option = ' --excl-flags {} '.format(param.SAMTOOLS_VIEW_FILTER_FLAG) max_depth_option = ' --max-depth {} '.format(args.max_depth) if args.max_depth is not None else " " reads_regions_option = ' -r {}'.format(" ".join(reads_regions)) if add_read_regions else "" stdin = None if tumor_bam_file_path != "PIPE" else sys.stdin tumor_bam_file_path = tumor_bam_file_path if tumor_bam_file_path != "PIPE" else "-" samtools_command = samtools_execute_command + " mpileup --reverse-del" + read_name_option + reads_regions_option + \ mq_option + bq_option + bed_option + flags_option + max_depth_option samtools_mpileup_process = subprocess_popen( shlex.split(samtools_command + ' ' + tumor_bam_file_path), stdin=stdin, stderr=subprocess.PIPE) if alt_fn: output_alt_fn = alt_fn alt_fp = open(output_alt_fn, 'w') is_tumor = alt_fn.split('/')[-2].startswith('tumor') if alt_fn else False has_pileup_candidates = len(candidates_pos_set) candidates_dict = defaultdict(str) for row in samtools_mpileup_process.stdout: # chr position N depth seq BQ read_name mapping_quality phasing_info columns = row.strip().split('\t') pos = int(columns[1]) pileup_bases = columns[4] read_name_list = columns[6].split(',') if store_tumor_infos else [] reference_base = reference_sequence[pos - reference_start].upper() if reference_base.upper() not in "ACGT": continue is_truth_candidate = pos in truths_variant_dict minimum_snv_af_for_candidate = minimum_snv_af_for_truth if is_truth_candidate and minimum_snv_af_for_truth else minimum_snv_af_for_candidate minimum_indel_af_for_candidate = minimum_indel_af_for_truth if is_truth_candidate and minimum_indel_af_for_truth else minimum_indel_af_for_candidate base_list, depth, pass_af, af, af_infos, pileup_infos, tumor_pileup_infos, alt_list, pass_snv_af, pass_indel_af, pileup_list = decode_pileup_bases( pileup_bases=pileup_bases, reference_base=reference_base, min_coverage=min_coverage, minimum_snv_af_for_candidate=minimum_snv_af_for_candidate, minimum_indel_af_for_candidate=minimum_indel_af_for_candidate, alternative_base_num=alternative_base_num, has_pileup_candidates=has_pileup_candidates, read_name_list=read_name_list, is_tumor=is_tumor, select_indel_candidates=select_indel_candidates ) if pos in hybrid_candidate_set: tumor_alt_info = str(depth) + '-' + ' '.join([' '.join([item[0], str(item[1])]) for item in pileup_list]) hybrid_info_dict[pos] = AltInfo(ref_base=reference_base, tumor_alt_info=tumor_alt_info) if pass_af and alt_fn: depth_list = [str(depth)] if output_depth else [] alt_info_list = [af_infos, pileup_infos, tumor_pileup_infos] if output_alt_info else [] alt_fp.write('\t'.join([ctg_name, str(pos), reference_base] + depth_list + alt_info_list) + '\n') if pass_af: candidates_set.add(pos) candidates_dict[pos] = (alt_list, depth) if pass_snv_af: snv_candidates_set.add(pos) tumor_info = [item for item in alt_list if item[0] in "ACGT"] if len(tumor_info) == 0: snv_candidates_set.remove(pos) if select_indel_candidates and pass_indel_af: indel_candidates_set.add(pos) tumor_info = [item for item in alt_list if '+' in item[0] or '-' in item[0]] if len(tumor_info) == 0: indel_candidates_set.remove(pos) if not pass_af and (pos in hybrid_candidate_set): candidates_set.add(pos) snv_candidates_set.add(pos) tumor_info = [item for item in alt_list if item[0] in "ACGT"] if len(tumor_info) == 0: snv_candidates_set.remove(pos) if select_indel_candidates: indel_candidates_set.add(pos) tumor_info = [item for item in alt_list if '+' in item[0] or '-' in item[0]] if len(tumor_info) == 0: indel_candidates_set.remove(pos) bed_path = os.path.join(candidates_folder, "bed", '{}_{}.bed'.format(ctg_name, chunk_id)) if not os.path.exists(os.path.join(candidates_folder, 'bed')): output = subprocess.run("mkdir -p {}".format(os.path.join(candidates_folder, 'bed')), shell=True) output_bed = open(bed_path, 'w') for pos in sorted(list(candidates_set)): output_bed.write('\t'.join([ctg_name, str(pos - 1), str(pos)]) + '\n') output_bed.close() snv_candidates_list = sorted([pos for pos in candidates_set if pos in snv_candidates_set]) if select_indel_candidates: indel_candidates_list = sorted([pos for pos in candidates_set if pos in indel_candidates_set]) gen_vcf = False if gen_vcf: truth_vcf_fn = args.truth_vcf_fn truth_vcf_reader = VcfReader(vcf_fn=truth_vcf_fn, ctg_name=ctg_name, show_ref=False, keep_row_str=True, skip_genotype=True) truth_vcf_reader.read_vcf() truth_variant_dict = truth_vcf_reader.variant_dict vcf_writer = VcfWriter(vcf_fn=os.path.join(candidates_folder, "{}_{}.vcf".format(ctg_name, chunk_id)), ref_fn=fasta_file_path, ctg_name=ctg_name, show_ref_calls=True) for pos in snv_candidates_list: genotype = '1/1' ref_base, alt_base = "A", "A" if pos in truth_variant_dict: print(ctg_name, pos, "in truth set") continue tumor_alt_list, tumor_depth = candidates_dict[pos] tumor_info = [item for item in tumor_alt_list if item[0] in "ACGT"] if len(tumor_info) == 0: # candidates_set.remove(pos) print(pos, "gen vcf not found tumor") continue alt_base, tumor_af = tumor_info[0] ref_base = reference_sequence[pos - reference_start].upper() vcf_writer.write_row(POS=pos, REF=ref_base, ALT=alt_base, QUAL=10, GT=genotype, DP=10, AF=float(tumor_af)) vcf_writer.close() if select_indel_candidates: print("[INFO] {} chunk {}/{}: Total snv candidates found: {}, total indel candidates found: {}".format(ctg_name, \ chunk_id, chunk_num, len(snv_candidates_list), len(indel_candidates_list))) else: print("[INFO] {} chunk {}/{}: Total candidates found: {}".format(ctg_name, chunk_id, chunk_num, len(snv_candidates_list))) if candidates_folder is not None and len(snv_candidates_list): all_candidates_regions = [] region_num = len(snv_candidates_list) // split_bed_size + 1 if len( snv_candidates_list) % split_bed_size else len(snv_candidates_list) // split_bed_size for idx in range(region_num): # a windows region for create tensor # samtools mpileup not include last position split_output = snv_candidates_list[idx * split_bed_size: (idx + 1) * split_bed_size] output_path = os.path.join(candidates_folder, '{}.{}_{}_{}'.format(ctg_name, chunk_id, idx, region_num)) all_candidates_regions.append(output_path) with open(output_path, 'w') as output_file: output_file.write('\n'.join( ['\t'.join([ctg_name, str(x - flankingBaseNum - 1), str(x + flankingBaseNum + 1)]) for x in split_output]) + '\n') # bed format all_candidates_regions_path = os.path.join(candidates_folder, 'CANDIDATES_FILE_{}_{}'.format(ctg_name, chunk_id)) with open(all_candidates_regions_path, 'w') as output_file: output_file.write('\n'.join(all_candidates_regions) + '\n') if select_indel_candidates and candidates_folder is not None and len(indel_candidates_list): all_candidates_regions = [] region_num = len(indel_candidates_list) // split_bed_size + 1 if len( indel_candidates_list) % split_bed_size else len(indel_candidates_list) // split_bed_size for idx in range(region_num): # a windows region for create tensor # samtools mpileup not include last position split_output = indel_candidates_list[idx * split_bed_size: (idx + 1) * split_bed_size] output_path = os.path.join(candidates_folder, '{}.{}_{}_{}_indel'.format(ctg_name, chunk_id, idx, region_num)) all_candidates_regions.append(output_path) with open(output_path, 'w') as output_file: output_file.write('\n'.join( ['\t'.join([ctg_name, str(x - flankingBaseNum - 1), str(x + flankingBaseNum + 1)]) for x in split_output]) + '\n') # bed format all_candidates_regions_path = os.path.join(candidates_folder, 'INDEL_CANDIDATES_FILE_{}_{}'.format(ctg_name, chunk_id)) with open(all_candidates_regions_path, 'w') as output_file: output_file.write('\n'.join(all_candidates_regions) + '\n') if hybrid_mode_vcf_fn is not None or genotyping_mode_vcf_fn is not None: hybrid_output_path = os.path.join(candidates_folder, '{}.{}_hybrid_info'.format(ctg_name, chunk_id)) with open(hybrid_output_path, 'w') as output_file: for k, v in hybrid_info_dict.items(): output_info = '\t'.join([ctg_name, str(k), v.ref_base, v.tumor_alt_info]) output_file.write(output_info + '\n') samtools_mpileup_process.stdout.close() samtools_mpileup_process.wait() if alt_fn: alt_fp.close() def main(): parser = ArgumentParser(description="Generate tumor variant candidates for tensor creation in calling") parser.add_argument('--platform', type=str, default='ont', help="Select the sequencing platform of the input. Default: %(default)s") parser.add_argument('--candidates_folder', type=str, default=None, help="Output candidate folder to store the candidate bed information, required") parser.add_argument('--tumor_bam_fn', type=str, default=None, help="Sorted tumor BAM file input, required") parser.add_argument('--ref_fn', type=str, default=None, help="Reference fasta file input, required") parser.add_argument('--snv_min_af', type=float, default=param.snv_min_af, help="Minimum SNV allele frequency in the tumor sample for a site to be considered as a candidate site in tumor sample, default: %(default)f") parser.add_argument('--ctg_name', type=str, default=None, help="The name of sequence to be processed, required if --bed_fn is not defined") parser.add_argument('--ctg_start', type=int, default=None, help="The 1-based starting position of the sequence to be processed, optional, will process the whole --ctg_name if not set") parser.add_argument('--ctg_end', type=int, default=None, help="The 1-based inclusive ending position of the sequence to be processed, optional, will process the whole --ctg_name if not set") parser.add_argument('--bed_fn', type=str, default=None, help="Call variant only in the provided regions. Will take an intersection if --ctg_name and/or (--ctg_start, --ctg_end) are set") parser.add_argument('--samtools', type=str, default="samtools", help="Path to the 'samtools', samtools version >= 1.10 is required. default: %(default)s") # options for advanced users parser.add_argument('--min_coverage', type=float, default=param.min_coverage, help="EXPERIMENTAL: Minimum coverage required in both normal and tumor sample to call a variant, default: %(default)f") parser.add_argument('--min_mq', type=int, default=param.min_mq, help="EXPERIMENTAL: If set, reads with mapping quality with <$min_mq are filtered, default: %(default)d") parser.add_argument('--min_bq', type=int, default=None, help="EXPERIMENTAL: If set, bases with base quality with <$min_bq are filtered, default: %(default)d") parser.add_argument('--max_depth', type=int, default=None, help="EXPERIMENTAL: Maximum depth to be processed. default: %(default)s") parser.add_argument('--alternative_base_num', type=int, default=param.alternative_base_num, help="EXPERIMENTAL: Minimum alternative base number to process a candidate. default: %(default)s")
parser.add_argument('--select_indel_candidates', type=str2bool, default=0,
6
2023-11-07 04:39:16+00:00
12k
sb-ai-lab/HypEx
tests/test_aa.py
[ { "identifier": "AATest", "path": "hypex/ab_test/aa_tester.py", "snippet": "class AATest:\n \"\"\"\n A class for conducting AA testing (random split testing) to assess the\n statistical uniform of two samples.\n\n AA testing is used to validate that the splitting mechanism of an A/B test\n is unbiased and random. This class supports various statistical methods to\n evaluate the equivalence of two randomly split samples in terms of various\n metrics.\n\n Attributes:\n target_fields (Union[Iterable[str], str]): Target column names to analyze. This fields should be numeric.\n group_cols (Union[str, Iterable[str]]): Column names used for grouping. This fields should be categorical. It's a field for stratification. Stratification - the way to divide groups with equal number of categories in each of them.\n info_cols (Union[str, Iterable[str]]): Column names for additional information.\n quant_field (str): Name of the column for quantization. This fields should be categorical. A quantum is a category that passes entirely into one of the groups.\n mode (str): Mode of the AA test. Options are 'simple' and 'balanced'. 'simple' - naively splits groups in half. 'balanced' - separation with quantum balancing at each step (only used if a quantization field is specified.\n alpha (float): Level of significance for statistical tests.\n\n Methods:\n columns_labeling(data):\n Classifies columns in the input DataFrame as target or group columns.\n __simple_mode(data, random_state, test_size):\n Internal method to create a simple random split of the data.\n split(data, random_state, test_size):\n Splits the dataset into test and control groups.\n _postprep_data(data, spit_indexes):\n Combines the index markup obtained at the split step.\n calc_ab_delta(a_mean, b_mean, mode):\n Calculates the difference between averages of two samples.\n sampling_metrics(data, random_state, test_size):\n Computes various metrics for a single random split of the data.\n calc_uniform_tests(data, test_size, iterations, file_name, experiment_write_mode, split_write_mode, write_step, pbar):\n Runs multiple iterations of AA tests to find a uniform distribution.\n features_p_value_distribution(experiment_results, figsize, bin_step):\n Plots the distribution of p-values for each feature.\n aa_score(experiment_results):\n Computes the average score for passed tests in AA testing.\n uniform_tests_interpretation(experiment_results):\n Analyzes and plots the results of uniform tests.\n num_feature_uniform_analysis(control_data, test_data, plot_set):\n Analyzes and plots numerical feature distributions in control and test data.\n cat_feature_uniform_analysis(control_data, test_data):\n Analyzes and plots categorical feature distributions in control and test data.\n experiment_result_transform(experiment):\n Transforms the result of an experiment into a readable format.\n split_analysis(splited_data):\n Analyzes split data for both target and group columns.\n get_resume(aa_score, best_experiment_stat):\n Formats the final results of AA testing for clarity.\n process(data, optimize_groups, iterations, show_plots, test_size, pbar):\n Main method to perform the complete AA test process, including optimization, testing, and result presentation.\n\n Example:\n >>> aa_test = AATest(target_fields=[\"metric1\", \"metric2\"], group_cols=[\"group\"], info_cols=[\"info1\", \"info2\"])\n >>> results = aa_test.process(data, optimize_groups=True, iterations=1000, show_plots=True)\n \"\"\"\n\n def __init__(\n self,\n target_fields: Union[Iterable[str], str] = None,\n group_cols: Union[str, Iterable[str]] = None,\n info_cols: Union[str, Iterable[str]] = None,\n quant_field: str = None,\n mode: str = \"simple\",\n alpha: float = 0.05,\n ):\n \"\"\"Initialize the AATest class.\n\n Args:\n target_fields:\n List or str with target columns. This fields should be numeric.\n group_cols:\n List or str with columns for grouping. This fields should be categorical. It's a field for stratification. Stratification - the way to divide groups with equal number of categories in each of them.\n info_cols:\n List or str with informational columns\n quant_field:\n String with name of column for quantization. This fields should be categorical. A quantum is a category that passes entirely into one of the groups.\n mode:\n Mode of the AA-test\n Available modes:\n * simple - naively splits groups in half\n * balanced - separation with quantum balancing at each step (only used if a quantization field is specified)\n alpha:\n Level of significance\n \"\"\"\n self.target_fields = (\n [target_fields] if isinstance(target_fields, str) else target_fields\n )\n self.group_cols = (\n [group_cols] if isinstance(group_cols, str) else group_cols\n ) or []\n self.info_cols = [info_cols] if isinstance(info_cols, str) else info_cols\n self.quant_field = quant_field\n self.mode = mode\n self.alpha = alpha\n\n def columns_labeling(self, data: pd.DataFrame) -> Dict[str, List[str]]:\n \"\"\"\n Label columns as target columns and group columns.\n\n Args:\n data:\n Input dataframe\n\n Returns:\n Dictionary with list of target columns and group columns\n\n \"\"\"\n return {\n \"target_field\": list(\n data.select_dtypes(include=\"number\").columns.drop(\n self.info_cols, errors=\"ignore\"\n )\n ),\n \"group_col\": list(\n data.select_dtypes(include=\"object\").columns.drop(\n self.info_cols, errors=\"ignore\"\n )\n ),\n }\n\n def __simple_mode(\n self, data: pd.DataFrame, random_state: int = None, test_size: float = 0.5\n ) -> Dict:\n \"\"\"Separates data on A and B samples within simple mode.\n Separation performed to divide groups of equal sizes - equal amount of records\n or equal amount of groups in each sample.\n\n Args:\n data:\n Input data\n random_state:\n Seed of random\n\n Returns:\n Test and control samples of indexes dictionary\n \"\"\"\n result = {\"test_indexes\": [], \"control_indexes\": []}\n\n if self.quant_field:\n random_ids = shuffle(\n data[self.quant_field].unique(), random_state=random_state\n )\n edge = int(len(random_ids) * test_size)\n result[\"test_indexes\"] = list(\n data[data[self.quant_field].isin(random_ids[:edge])].index\n )\n result[\"control_indexes\"] = list(\n data[data[self.quant_field].isin(random_ids[edge:])].index\n )\n\n else:\n addition_indexes = list(shuffle(data.index, random_state=random_state))\n edge = int(len(addition_indexes) * test_size)\n result[\"test_indexes\"] = addition_indexes[:edge]\n result[\"control_indexes\"] = addition_indexes[edge:]\n\n return result\n\n def split(\n self, data: pd.DataFrame, random_state: int = None, test_size: float = 0.5\n ) -> Dict:\n \"\"\"Divides sample on two groups.\n\n Args:\n data:\n Input data\n random_state:\n Seed of random - one integer to fix split\n test_size:\n Proportion of the test group\n\n Returns:\n Dict of indexes with division on test and control group\n \"\"\"\n result = {\"test_indexes\": [], \"control_indexes\": []}\n\n if self.group_cols:\n groups = data.groupby(self.group_cols)\n for _, gd in groups:\n if self.mode not in (\"balanced\", \"simple\"):\n warnings.warn(\n f\"The mode '{self.mode}' is not supported for group division. Implemented mode 'simple'.\"\n )\n self.mode = \"simple\"\n\n if self.mode == \"simple\":\n t_result = self.__simple_mode(gd, random_state, test_size)\n result[\"test_indexes\"] += t_result[\"test_indexes\"]\n result[\"control_indexes\"] += t_result[\"control_indexes\"]\n\n elif self.mode == \"balanced\":\n if self.quant_field:\n random_ids = shuffle(\n gd[self.quant_field].unique(), random_state=random_state\n )\n addition_indexes = list(\n gd[gd[self.quant_field].isin(random_ids)].index\n )\n else:\n addition_indexes = list(\n shuffle(gd.index, random_state=random_state)\n )\n\n if len(result[\"control_indexes\"]) > len(result[\"test_indexes\"]):\n result[\"test_indexes\"] += addition_indexes\n else:\n result[\"control_indexes\"] += addition_indexes\n\n else:\n if self.mode != \"simple\":\n warnings.warn(\n f\"The mode '{self.mode}' is not supported for regular division. \"\n f\"Implemented mode 'simple'.\"\n )\n\n t_result = self.__simple_mode(data, random_state, test_size)\n result[\"test_indexes\"] = t_result[\"test_indexes\"]\n result[\"control_indexes\"] = t_result[\"control_indexes\"]\n\n result[\"test_indexes\"] = list(set(result[\"test_indexes\"]))\n result[\"control_indexes\"] = list(set(result[\"control_indexes\"]))\n\n return result\n\n @staticmethod\n def _postprep_data(data, spit_indexes: Dict = None) -> pd.DataFrame:\n \"\"\"Prepares data to show user.\n Adds info_cols and decode binary variables.\n\n Args:\n data:\n Input data\n spit_indexes:\n Dict of indexes with separation on test and control group\n\n Returns:\n Separated initial data with column \"group\"\n \"\"\"\n test = data.loc[spit_indexes[\"test_indexes\"]]\n control = data.loc[spit_indexes[\"control_indexes\"]]\n data = merge_groups(control, test)\n\n return data\n\n @staticmethod\n def calc_ab_delta(a_mean: float, b_mean: float, mode: str = \"percentile\")->float:\n \"\"\"Calculates target delta between A and B groups.\n\n Args:\n a_mean:\n Average of target in one group\n b_mean:\n Average of target in another group\n mode:\n Type of expected result:\n * 'percentile' - percentage exceeding the average in group A compared to group B\n * 'absolute' - absolute value of difference between B and A group\n * 'relative' - percent in format of number (absolute) exceeding the average in group A compared to group B\n\n Returns:\n Delta between groups as percent or absolute value\n \"\"\"\n if mode == \"percentile\":\n return (1 - a_mean / b_mean) * 100\n if mode == \"absolute\":\n return b_mean - a_mean\n if mode == \"relative\":\n return 1 - a_mean / b_mean\n\n def sampling_metrics(\n self, data: pd.DataFrame, random_state: int = None, test_size: float = 0.5\n ) -> Dict:\n \"\"\"Calculates metrics of one sampling.\n\n Args:\n data:\n Input data\n random_state:\n Random seeds for searching\n test_size:\n Proportion of the test group\n\n Returns:\n Dict of\n 1) metrics dataframe (stat tests) and\n 2) dict of random state with test_control dataframe\n \"\"\"\n scores = []\n t_result = {\"random_state\": random_state}\n\n split = self.split(data, random_state, test_size)\n\n a = data.loc[split[\"control_indexes\"]]\n b = data.loc[split[\"test_indexes\"]]\n\n data_from_sampling_dict = {random_state: self._postprep_data(data, split)}\n for tf in self.target_fields:\n ta = a[tf]\n tb = b[tf]\n\n t_result[f\"{tf} a mean\"] = ta.mean()\n t_result[f\"{tf} b mean\"] = tb.mean()\n t_result[f\"{tf} ab delta\"] = self.calc_ab_delta(\n t_result[f\"{tf} a mean\"], t_result[f\"{tf} b mean\"], \"absolute\"\n )\n t_result[f\"{tf} ab delta %\"] = self.calc_ab_delta(\n t_result[f\"{tf} a mean\"], t_result[f\"{tf} b mean\"], \"percentile\"\n )\n t_result[f\"{tf} t-test p-value\"] = ttest_ind(\n ta, tb, nan_policy=\"omit\"\n ).pvalue\n t_result[f\"{tf} ks-test p-value\"] = ks_2samp(ta, tb).pvalue\n t_result[f\"{tf} t-test passed\"] = (\n t_result[f\"{tf} t-test p-value\"] < self.alpha\n )\n t_result[f\"{tf} ks-test passed\"] = (\n t_result[f\"{tf} ks-test p-value\"] < self.alpha\n )\n scores.append(\n (\n t_result[f\"{tf} t-test p-value\"]\n + 2 * t_result[f\"{tf} ks-test p-value\"]\n )\n / 3\n )\n\n t_result[\"control %\"] = len(a) / len(data) * 100\n t_result[\"test %\"] = len(b) / len(data) * 100\n t_result[\"control size\"] = len(a)\n t_result[\"test size\"] = len(b)\n t_result[\"t-test mean p-value\"] = np.mean(\n [p_value for key, p_value in t_result.items() if \"t-test p-value\" in key]\n )\n t_result[\"ks-test mean p-value\"] = np.mean(\n [p_value for key, p_value in t_result.items() if \"ks-test p-value\" in key]\n )\n t_result[\"t-test passed %\"] = np.mean(\n [passed * 100 for key, passed in t_result.items() if \"t-test passed\" in key]\n )\n t_result[\"ks-test passed %\"] = np.mean(\n [\n passed * 100\n for key, passed in t_result.items()\n if \"ks-test passed\" in key\n ]\n )\n t_result[\"mean_tests_score\"] = np.mean(scores)\n return {\"metrics\": t_result, \"data_from_experiment\": data_from_sampling_dict}\n\n def calc_uniform_tests(\n self,\n data: pd.DataFrame,\n test_size: float = 0.5,\n iterations: int = 2000,\n file_name: Union[Path, str] = None,\n experiment_write_mode: str = \"full\",\n split_write_mode: str = \"full\",\n write_step: int = None,\n pbar: bool = True,\n **kwargs,\n ) -> Optional[Tuple[pd.DataFrame, Dict[Any, Dict]]]:\n \"\"\"Performs multiple separation experiments for different random states.\n\n Args:\n data:\n Input data\n iterations:\n Number of iterations to search uniform sampling to searching\n test_size:\n Proportion of the test group\n file_name:\n Name of file to save results (if None - no results will be saved, func returns result)\n experiment_write_mode:\n Mode to write experiment results:\n 'full' - save all experiments\n 'all' - save experiments that passed all statistical tests\n 'any' - save experiments that passed any statistical test\n split_write_mode:\n Mode to write split results:\n 'full' - save all experiments\n 'all' - save experiments that passed all statistical tests\n 'any' - save experiments that passed any statistical test\n write_step:\n Step to write experiments to file\n pbar:\n Flag to show progress bar\n\n Returns:\n If no saving (no file_name, no write mode and no write_step) returns dataframe\n else None and saves file to csv\n \"\"\"\n random_states = range(iterations)\n results = []\n data_from_sampling = {}\n\n if experiment_write_mode not in (\"full\", \"all\", \"any\"):\n warnings.warn(\n f\"Write mode '{experiment_write_mode}' is not supported. Mode 'full' will be used\"\n )\n experiment_write_mode = \"full\"\n if split_write_mode not in (\"full\", \"all\", \"any\"):\n warnings.warn(\n f\"Write mode '{split_write_mode}' is not supported. Mode 'full' will be used\"\n )\n split_write_mode = \"full\"\n\n for i, rs in tqdm(\n enumerate(random_states), total=len(random_states), disable=not pbar\n ):\n res = self.sampling_metrics(data, random_state=rs, test_size=test_size)\n\n # write to file\n passed = []\n for tf in self.target_fields:\n passed += [\n not res[\"metrics\"][f\"{tf} t-test passed\"],\n not res[\"metrics\"][f\"{tf} ks-test passed\"],\n ]\n\n if all(passed):\n if experiment_write_mode == \"all\":\n results.append(res[\"metrics\"])\n if split_write_mode == \"all\":\n data_from_sampling.update(res[\"data_from_experiment\"])\n if any(passed):\n if experiment_write_mode == \"any\":\n results.append(res[\"metrics\"])\n if split_write_mode == \"any\":\n data_from_sampling.update(res[\"data_from_experiment\"])\n if experiment_write_mode == \"full\":\n results.append(res[\"metrics\"])\n if split_write_mode == \"full\":\n data_from_sampling.update(res[\"data_from_experiment\"])\n\n if file_name and write_step:\n if i == write_step:\n pd.DataFrame(results).to_csv(file_name, index=False)\n elif i % write_step == 0:\n pd.DataFrame(results).to_csv(\n file_name, index=False, header=False, mode=\"a\"\n )\n results = []\n\n results = pd.DataFrame(results)\n if file_name and write_step:\n results.to_csv(file_name, index=False, header=False, mode=\"a\")\n elif file_name:\n results.to_csv(file_name, index=False)\n return results, data_from_sampling\n else:\n return results, data_from_sampling\n\n def features_p_value_distribution(\n self, experiment_results: pd.DataFrame, figsize=None, bin_step=0.05\n ):\n \"\"\"Process plots of features' p-value distribution.\n\n Args:\n experiment_results:\n Results of experiments\n figsize:\n Size of figure for plot\n bin_step:\n Step for bins in X axis\n \"\"\"\n feature_num = len(self.target_fields)\n figsize = figsize or (15, 7 * feature_num)\n bin_step = bin_step or self.alpha\n bins = np.arange(0, 1 + bin_step, bin_step)\n figure, axs = plt.subplots(nrows=feature_num, ncols=2, figsize=figsize)\n for i in range(feature_num):\n sns.histplot(\n data=experiment_results,\n x=f\"{self.target_fields[i]} t-test p-value\",\n ax=axs[i, 0],\n bins=bins,\n stat=\"percent\",\n shrink=0.8,\n )\n sns.histplot(\n data=experiment_results,\n x=f\"{self.target_fields[i]} ks-test p-value\",\n ax=axs[i, 1],\n bins=bins,\n stat=\"percent\",\n shrink=0.8,\n )\n\n axs[i, 0].set_title(\n f\"{self.target_fields[i]} t-test p-value\\npassed score: {experiment_results[f'{self.target_fields[i]} t-test passed'].mean():.3f}\"\n )\n axs[i, 1].set_title(\n f\"{self.target_fields[i]} ks-test p-value\\npassed score: {experiment_results[f'{self.target_fields[i]} ks-test passed'].mean():.3f}\"\n )\n plt.show()\n\n def aa_score(self, experiment_results: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Estimates mean passed score for t-test and ks-test in AA-test.\n\n Args:\n experiment_results:\n Results of the experiment\n\n Returns:\n Pandas dataframe containing the results of the AA-test\n \"\"\"\n result = pd.DataFrame(\n {\n f: {\n \"t-test passed score\": experiment_results[\n f\"{f} t-test passed\"\n ].mean(),\n \"ks-test passed score\": experiment_results[\n f\"{f} ks-test passed\"\n ].mean(),\n }\n for f in self.target_fields\n }\n ).T\n\n result[\"t-test aa passed\"] = result[\"t-test passed score\"].apply(\n lambda x: 0.8 * self.alpha <= x <= 1.2 * self.alpha\n )\n result[\"ks-test aa passed\"] = result[\"ks-test passed score\"].apply(\n lambda x: 0.8 * self.alpha <= x <= 1.2 * self.alpha\n )\n result.loc[\"mean\"] = result.mean()\n\n return result\n\n def uniform_tests_interpretation(\n self, experiment_results: pd.DataFrame, **kwargs\n ) -> pd.DataFrame:\n \"\"\"Process plotting of p-value distribution and results of AA-test.\n\n Args:\n experiment_results:\n Results of experiments\n **kwargs:\n Some extra keyword arguments:\n * figsize: Size of figure for plot\n * bin_step: Step for bins in X axis\n\n Returns:\n Pandas dataframe containing the results of the AA-test\n \"\"\"\n self.features_p_value_distribution(\n experiment_results,\n figsize=kwargs.get(\"figsize\"),\n bin_step=kwargs.get(\"bin_step\"),\n )\n return self.aa_score(experiment_results)\n\n def num_feature_uniform_analysis(\n self,\n control_data: pd.Series,\n test_data: pd.Series,\n plot_set: Tuple = (\"hist\", \"cumulative\", \"percentile\"),\n **kwargs,\n ):\n \"\"\"Show plots of distribution in groups with uniform tests.\n\n Args:\n control_data:\n Data from control group\n test_data:\n Data from test group\n plot_set:\n Type of plot\n Available types:\n * hist\n * cumulative\n * percentile\n **kwargs:\n Some extra keyword arguments:\n * figsize: Size of figure for plot\n * bins: Number of bins in X axis\n * alpha: Transparency of histograms\n \"\"\"\n if not plot_set:\n return\n\n figsize = kwargs.get(\"figsize\", (25, 20))\n figure, axs = plt.subplots(\n nrows=len(plot_set),\n ncols=1,\n figsize=figsize,\n facecolor=\"honeydew\",\n edgecolor=\"black\",\n )\n ax_count = 0\n\n bins = np.arange(\n min(control_data.min(), test_data.min()),\n max(control_data.max(), test_data.max()),\n (\n max(control_data.max(), test_data.max())\n - min(control_data.min(), test_data.min())\n )\n / kwargs.get(\"bins\", 100),\n )\n\n if \"hist\" in plot_set:\n sns.histplot(\n data=control_data,\n ax=axs[ax_count],\n bins=bins,\n stat=\"percent\",\n element=\"poly\",\n alpha=kwargs.get(\"alpha\", 0.3),\n color=\"blue\",\n )\n sns.histplot(\n data=test_data,\n ax=axs[ax_count],\n bins=bins,\n stat=\"percent\",\n element=\"poly\",\n alpha=kwargs.get(\"alpha\", 0.3),\n color=\"red\",\n )\n axs[ax_count].grid(True)\n axs[ax_count].legend([\"control\", \"test\"])\n axs[ax_count].set_title(\"Histogram\")\n ax_count += 1\n\n if \"cumulative\" in plot_set:\n sns.histplot(\n data=control_data,\n ax=axs[ax_count],\n bins=bins,\n stat=\"percent\",\n element=\"poly\",\n cumulative=True,\n alpha=kwargs.get(\"alpha\", 0.3),\n color=\"blue\",\n )\n sns.histplot(\n data=test_data,\n ax=axs[ax_count],\n bins=bins,\n stat=\"percent\",\n element=\"poly\",\n cumulative=True,\n alpha=kwargs.get(\"alpha\", 0.3),\n color=\"red\",\n )\n axs[ax_count].legend([\"control\", \"test\"])\n axs[ax_count].set_title(\"Cumulative destribution\")\n ax_count += 1\n\n if \"percentile\" in plot_set:\n axs[ax_count].fill_between(\n range(101),\n [control_data.quantile(q) for q in np.arange(0, 1.01, 0.01)],\n color=\"blue\",\n alpha=kwargs.get(\"alpha\", 0.3),\n )\n axs[ax_count].fill_between(\n range(101),\n [test_data.quantile(q) for q in np.arange(0, 1.01, 0.01)],\n color=\"red\",\n alpha=kwargs.get(\"alpha\", 0.3),\n )\n axs[ax_count].legend([\"control\", \"test\"])\n axs[ax_count].set_xticks(np.arange(0, 101))\n axs[ax_count].set_xticklabels(np.arange(0, 101), rotation=45)\n axs[ax_count].set_title(\"Percentile destribution\")\n\n fig_title = f\"\"\"{control_data.name}\n\n t-test p-value: {ttest_ind(control_data, test_data, nan_policy='omit').pvalue:.3f}\n ks-test p-value: {ks_2samp(control_data, test_data).pvalue:.3f}\"\"\"\n figure.suptitle(fig_title, fontsize=kwargs.get(\"title_size\", 20))\n plt.show()\n\n def cat_feature_uniform_analysis(\n self, control_data: pd.Series, test_data: pd.Series, **kwargs\n ):\n \"\"\"Show plots of distribution in groups.\n\n Args:\n control_data:\n Data from control group\n test_data:\n Data from test group\n **kwargs:\n Some extra keyword arguments:\n * figsize: Size of figure for plot\n * alpha: Transparency of histograms\n \"\"\"\n s_control_data = control_data.astype(\"str\")\n s_test_data = test_data.astype(\"str\")\n\n figsize = kwargs.get(\"figsize\", (15, 10))\n figure, ax = plt.subplots(\n nrows=1, ncols=1, figsize=figsize, facecolor=\"honeydew\", edgecolor=\"black\"\n )\n\n control_counts = s_control_data.value_counts(normalize=True) * 100\n test_counts = s_test_data.value_counts(normalize=True) * 100\n\n ax.fill_between(\n control_counts.index,\n control_counts.values,\n color=\"blue\",\n alpha=kwargs.get(\"alpha\", 0.3),\n label=\"control\",\n )\n ax.fill_between(\n test_counts.index,\n test_counts[\n [i for i in test_counts.index if i in control_counts.index]\n ].values,\n color=\"red\",\n alpha=kwargs.get(\"alpha\", 0.3),\n label=\"test\",\n )\n\n ax.legend()\n ax.tick_params(axis=\"x\", rotation=90)\n figure.suptitle(f\"{control_data.name}\", fontsize=kwargs.get(\"title_size\", 20))\n plt.show()\n\n def experiment_result_transform(self, experiment: pd.Series):\n \"\"\"\n Transform experiments results into readable view.\n\n Args:\n experiment:\n Results of experiments\n\n Returns:\n DataFrame with results of the experiment and statistics from best split\n \"\"\"\n targets_dict = {}\n for tf in self.target_fields:\n targets_dict[tf] = {}\n for i in experiment.index:\n if i.startswith(f\"{tf} \"):\n targets_dict[tf][i[len(tf) + 1 :]] = experiment[i]\n return pd.DataFrame(targets_dict).T, experiment.iloc[-9:]\n\n def split_analysis(self, splited_data: pd.DataFrame, **kwargs):\n \"\"\"Conducts a full splitting analysis.\n\n Args:\n splited_data:\n Data that has already been split\n **kwargs:\n Some extra keyword arguments for plots in visualization\n \"\"\"\n ssp = split_splited_data(splited_data)\n for nf in self.target_fields:\n self.num_feature_uniform_analysis(\n ssp[\"control\"][nf], ssp[\"test\"][nf], **kwargs\n )\n for cf in self.group_cols:\n self.cat_feature_uniform_analysis(\n ssp[\"control\"][cf], ssp[\"test\"][cf], **kwargs\n )\n\n def get_resume(self, aa_score: pd.DataFrame, best_experiment_stat: pd.DataFrame):\n \"\"\"Format results into clear format for understanding.\n\n Args:\n aa_score:\n Results of aa-test\n best_experiment_stat:\n Results of the best experiment\n\n Returns:\n DataFrame with OK and not OK depending on the results of statistical tests\n \"\"\"\n result = {\"aa test passed\": {}, \"split is uniform\": {}}\n for field in self.target_fields:\n result[\"aa test passed\"][field] = (\n aa_score.loc[field, \"t-test aa passed\"]\n or aa_score.loc[field, \"ks-test aa passed\"]\n )\n result[\"split is uniform\"][field] = (\n best_experiment_stat.loc[field, \"t-test passed\"]\n or best_experiment_stat.loc[field, \"ks-test passed\"]\n )\n result = pd.DataFrame(result)\n result[\"split is uniform\"] = (\n result[\"split is uniform\"]\n .astype(\"bool\")\n .replace({False: \"OK\", True: \"not OK\"})\n )\n result[\"aa test passed\"] = (\n result[\"aa test passed\"]\n .astype(\"bool\")\n .replace({False: \"not OK\", True: \"OK\"})\n )\n return result\n\n def process(\n self,\n data: pd.DataFrame,\n optimize_groups: bool = False,\n iterations: int = 2000,\n show_plots: bool=True,\n test_size: float=0.5,\n pbar: bool=True,\n **kwargs,\n ):\n \"\"\"Main function for AATest estimation.\n\n Provides:\n * Columns labeling\n * Results calculations\n * Plotting results\n\n Args:\n test_size:\n Proportion of the test group\n data:\n Input dataset\n optimize_groups:\n Is in necessary to optimize groups\n iterations:\n Number of iterations for AA-test\n show_plots:\n Is in necessary to show plots\n pbar:\n Show progress-bar\n **kwargs:\n Some extra keyword arguments\n\n Returns:\n best_results:\n Results of the experiment with metrics for all fields\n best_split:\n Result of separation\n \"\"\"\n labeling = self.columns_labeling(data)\n best_results, best_split = None, None\n\n if not self.target_fields:\n self.target_fields = labeling[\"target_fields\"]\n\n if optimize_groups:\n max_score = -1\n\n group_variants = [[]]\n for i in range(1, len(labeling[\"group_col\"])):\n i_combinations = combinations(labeling[\"group_col\"], i)\n group_variants.extend(iter(i_combinations))\n\n for gs in tqdm(group_variants, desc=\"Group optimization\", disable=not pbar):\n self.group_cols = list(gs)\n experiment_results, data_splits = self.calc_uniform_tests(\n data,\n pbar=False,\n iterations=iterations,\n test_size=test_size,\n **kwargs,\n )\n if len(experiment_results):\n aa_scores = self.aa_score(experiment_results)\n group_score = max(\n aa_scores.loc[\"mean\", \"t-test aa passed\"],\n aa_scores.loc[\"mean\", \"ks-test aa passed\"],\n )\n if group_score > max_score:\n best_results, best_split = experiment_results, data_splits\n max_score = group_score\n\n else:\n best_results, best_split = self.calc_uniform_tests(\n data,\n experiment_write_mode=\"full\",\n split_write_mode=\"any\",\n iterations=iterations,\n test_size=test_size,\n pbar=pbar,\n **kwargs,\n )\n\n if len(best_results) == 0:\n return best_results, best_split\n if len(best_results) > 0:\n if show_plots:\n aa_scores = self.uniform_tests_interpretation(best_results)\n else:\n aa_scores = self.aa_score(best_results)\n best_rs = best_results.loc[\n best_results[\"mean_tests_score\"].idxmax(), \"random_state\"\n ]\n final_split = best_split[best_rs]\n if show_plots:\n self.split_analysis(final_split, **kwargs)\n\n best_experiment_stat, best_split_stat = self.experiment_result_transform(\n best_results[best_results[\"random_state\"] == best_rs].iloc[0]\n )\n resume = self.get_resume(aa_scores, best_experiment_stat)\n else:\n aa_scores = None\n final_split = None\n best_experiment_stat = None\n best_split_stat = None\n resume = None\n\n return {\n \"experiments\": best_results,\n \"aa_score\": aa_scores,\n \"split\": final_split,\n \"best_experiment_stat\": best_experiment_stat,\n \"split_stat\": best_split_stat,\n \"resume\": resume,\n }" }, { "identifier": "create_test_data", "path": "hypex/utils/tutorial_data_creation.py", "snippet": "def create_test_data(\n num_users: int = 10000,\n na_step: Union[Iterable[int], int] = None,\n nan_cols: Union[Iterable[str], str] = None,\n file_name: str = None,\n rs=None\n):\n \"\"\"Creates data for tutorial.\n\n Args:\n num_users: num of strings\n na_step: \n num or list of nums of period to make NaN (step of range)\n If list - iterates accordingly order of columns\n nan_cols: \n name of one or several columns to fill with NaN\n If list - iterates accordingly order of na_step\n file_name: name of file to save; doesn't save file if None\n\n Returns:\n data: dataframe with\n \"\"\"\n if rs is not None:\n np.random.seed(rs)\n\n if (nan_cols is not None) and isinstance(nan_cols, str):\n nan_cols = [nan_cols]\n # Simulating dataset with known effect size\n num_months = 12\n\n # signup_months == 0 means customer did not sign up\n signup_months = np.random.choice(np.arange(1, num_months), num_users) * np.random.randint(0, 2, size=num_users)\n\n data = pd.DataFrame(\n {\n \"user_id\": np.repeat(np.arange(num_users), num_months),\n \"signup_month\": np.repeat(signup_months, num_months), # signup month == 0 means customer did not sign up\n \"month\": np.tile(np.arange(1, num_months + 1), num_users), # months are from 1 to 12\n \"spend\": np.random.poisson(500, num_users * num_months),\n }\n )\n\n # A customer is in the treatment group if and only if they signed up\n data[\"treat\"] = data[\"signup_month\"] > 0\n\n # Simulating an effect of month (monotonically decreasing--customers buy less later in the year)\n data[\"spend\"] = data[\"spend\"] - data[\"month\"] * 10\n\n # Simulating a simple treatment effect of 100\n after_signup = (data[\"signup_month\"] < data[\"month\"]) & (data[\"treat\"])\n data.loc[after_signup, \"spend\"] = data[after_signup][\"spend\"] + 100\n\n # Setting the signup month (for ease of analysis)\n i = 3\n data = (\n data[data.signup_month.isin([0, i])]\n .groupby([\"user_id\", \"signup_month\", \"treat\"])\n .apply(\n lambda x: pd.Series(\n {\"pre_spends\": x.loc[x.month < i, \"spend\"].mean(), \"post_spends\": x.loc[x.month > i, \"spend\"].mean(), }\n )\n )\n .reset_index()\n )\n\n # Additional category features\n gender_i = np.random.choice(a=[0, 1], size=data.user_id.nunique())\n gender = [[\"M\", \"F\"][i] for i in gender_i]\n\n age = np.random.choice(a=range(18, 70), size=data.user_id.nunique())\n\n industry_i = np.random.choice(a=range(1, 3), size=data.user_id.nunique())\n industry_names = [\"Finance\", \"E-commerce\", \"Logistics\"]\n industry = [industry_names[i] for i in industry_i]\n\n data[\"age\"] = age\n data[\"gender\"] = gender\n data[\"industry\"] = industry\n data[\"industry\"] = data[\"industry\"].astype(\"str\")\n data[\"treat\"] = data[\"treat\"].astype(int)\n\n # input nans in data if needed\n data = set_nans(data, na_step, nan_cols)\n\n if file_name is not None:\n data.to_csv(ROOT / f\"{file_name}.csv\", index=False)\n\n return data" } ]
import pandas as pd import pytest import sys from pathlib import Path from hypex import AATest from hypex.utils.tutorial_data_creation import create_test_data
9,093
sys.path.append(str(Path(".").absolute().parent)) @pytest.fixture def data(): return create_test_data(rs=52) @pytest.fixture def iterations(): return 20 @pytest.fixture def info_col(): return "user_id" def test_aa_simple(data, iterations, info_col):
sys.path.append(str(Path(".").absolute().parent)) @pytest.fixture def data(): return create_test_data(rs=52) @pytest.fixture def iterations(): return 20 @pytest.fixture def info_col(): return "user_id" def test_aa_simple(data, iterations, info_col):
model = AATest(target_fields=["pre_spends", "post_spends"], info_cols=info_col)
0
2023-11-01 08:58:57+00:00
12k
mileswyn/SAMIHS
models/segment_anything_samihs/automatic_mask_generator.py
[ { "identifier": "Samihs", "path": "models/segment_anything_samihs/modeling/samihs.py", "snippet": "class Samihs(nn.Module):\n mask_threshold: float = 0.0\n image_format: str = \"RGB\"\n\n def __init__(\n self,\n image_encoder: ImageEncoderViT,\n prompt_encoder: PromptEncoder,\n mask_decoder: MaskDecoder,\n pixel_mean: List[float] = [123.675, 116.28, 103.53],\n pixel_std: List[float] = [58.395, 57.12, 57.375],\n ) -> None:\n \"\"\"\n SAM predicts object masks from an image and input prompts.\n\n Arguments:\n image_encoder (ImageEncoderViT): The backbone used to encode the\n image into image embeddings that allow for efficient mask prediction.\n prompt_encoder (PromptEncoder): Encodes various types of input prompts.\n mask_decoder (MaskDecoder): Predicts masks from the image embeddings\n and encoded prompts.\n pixel_mean (list(float)): Mean values for normalizing pixels in the input image.\n pixel_std (list(float)): Std values for normalizing pixels in the input image.\n \"\"\"\n super().__init__()\n self.image_encoder = image_encoder\n self.prompt_encoder = prompt_encoder\n self.mask_decoder = mask_decoder\n self.register_buffer(\"pixel_mean\", torch.Tensor(pixel_mean).view(-1, 1, 1), False)\n self.register_buffer(\"pixel_std\", torch.Tensor(pixel_std).view(-1, 1, 1), False)\n\n ####################################################\n # for param in self.prompt_encoder.parameters():\n # param.requires_grad = False\n # for param in self.mask_decoder.parameters():\n # param.requires_grad = False\n ####################################################\n # for param in self.image_encoder.parameters():\n # param.requires_grad = False\n for n, value in self.image_encoder.named_parameters():\n if \"down_projection\" not in n and \"Adapter\" not in n:\n value.requires_grad = False\n if \"down_projection\" in n or \"adapter\" in n:\n value.requires_grad = True\n\n @property\n def device(self) -> Any:\n return self.pixel_mean.device\n\n @torch.no_grad()\n def forward_sam(\n self,\n batched_input: List[Dict[str, Any]],\n multimask_output: bool,\n ) -> List[Dict[str, torch.Tensor]]:\n \"\"\"\n Predicts masks end-to-end from provided images and prompts.\n If prompts are not known in advance, using SamPredictor is\n recommended over calling the model directly.\n\n Arguments:\n batched_input (list(dict)): A list over input images, each a\n dictionary with the following keys. A prompt key can be\n excluded if it is not present.\n 'image': The image as a torch tensor in 3xHxW format,\n already transformed for input to the model.\n 'original_size': (tuple(int, int)) The original size of\n the image before transformation, as (H, W).\n 'point_coords': (torch.Tensor) Batched point prompts for\n this image, with shape BxNx2. Already transformed to the\n input frame of the model.\n 'point_labels': (torch.Tensor) Batched labels for point prompts,\n with shape BxN.\n 'boxes': (torch.Tensor) Batched box inputs, with shape Bx4.\n Already transformed to the input frame of the model.\n 'mask_inputs': (torch.Tensor) Batched mask inputs to the model,\n in the form Bx1xHxW.\n multimask_output (bool): Whether the model should predict multiple\n disambiguating masks, or return a single mask.\n\n Returns:\n (list(dict)): A list over input images, where each element is\n as dictionary with the following keys.\n 'masks': (torch.Tensor) Batched binary mask predictions,\n with shape BxCxHxW, where B is the number of input prompts,\n C is determined by multimask_output, and (H, W) is the\n original size of the image.\n 'iou_predictions': (torch.Tensor) The model's predictions\n of mask quality, in shape BxC.\n 'low_res_logits': (torch.Tensor) Low resolution logits with\n shape BxCxHxW, where H=W=256. Can be passed as mask input\n to subsequent iterations of prediction.\n \"\"\"\n input_images = torch.stack([self.preprocess(x[\"image\"]) for x in batched_input], dim=0)\n image_embeddings, skip_cache = self.image_encoder(input_images)\n\n outputs = []\n for image_record, curr_embedding in zip(batched_input, image_embeddings):\n if \"point_coords\" in image_record:\n points = (image_record[\"point_coords\"], image_record[\"point_labels\"])\n else:\n points = None\n sparse_embeddings, dense_embeddings = self.prompt_encoder(\n points=points,\n boxes=image_record.get(\"boxes\", None),\n masks=image_record.get(\"mask_inputs\", None),\n )\n low_res_masks, iou_predictions = self.mask_decoder(\n image_embeddings=curr_embedding.unsqueeze(0),\n image_pe=self.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output,\n encoder_cache=skip_cache,\n )\n masks = self.postprocess_masks(\n low_res_masks,\n input_size=image_record[\"image\"].shape[-2:],\n original_size=image_record[\"original_size\"],\n )\n masks = masks > self.mask_threshold\n outputs.append(\n {\n \"masks\": masks,\n \"iou_predictions\": iou_predictions,\n \"low_res_logits\": low_res_masks,\n }\n )\n return outputs\n\n def forward(\n self, \n imgs: torch.Tensor,\n pt: Tuple[torch.Tensor, torch.Tensor], # [b n 2, b n]\n bbox: torch.Tensor=None, # b 4\n ) -> torch.Tensor:\n # imge, skip_cache = self.image_encoder(imgs)\n imge = self.image_encoder(imgs)\n if len(pt[0].shape) == 3:\n se, de = self.prompt_encoder( # se b 2 256, de b 256 32 32\n points=pt,\n boxes=None,\n masks=None,\n )\n low_res_masks, _ = self.mask_decoder( # low_res_mask b 1 128 128\n image_embeddings=imge,\n image_pe=self.prompt_encoder.get_dense_pe(), \n sparse_prompt_embeddings=se,\n dense_prompt_embeddings=de, \n multimask_output=False,\n # encoder_cache=skip_cache,\n )\n masks = F.interpolate(low_res_masks, (256, 256), mode=\"bilinear\", align_corners=False)\n outputs = {\"low_res_logits\": low_res_masks, \"masks\": low_res_masks} # 10.10\n return outputs\n else:\n low_res_masks, masks = [], []\n for i in range(pt[0].shape[1]):\n pti = (pt[0][:, i, :, :], pt[1][:, i, :])\n sei, dei = self.prompt_encoder( # se b 2 256, de b 256 32 32\n points=pti,\n boxes=None,\n masks=None,\n )\n low_res_masksi, _ = self.mask_decoder( # low_res_mask b 1 128 128\n image_embeddings=imge,\n image_pe=self.prompt_encoder.get_dense_pe(), \n sparse_prompt_embeddings=sei,\n dense_prompt_embeddings=dei, \n multimask_output=False,\n )\n masksi = F.interpolate(low_res_masksi, (256, 256), mode=\"bilinear\", align_corners=False)\n low_res_masks.append(low_res_masksi)\n masks.append(masksi)\n low_res_masks = torch.stack(low_res_masks, dim=1)\n masks = torch.stack(masks, dim=1) # b c 1 255 255\n masks = masks.reshape(masks.shape[0], -1, masks.shape[3], masks.shape[4])\n low_res_masks = low_res_masks.reshape(low_res_masks.shape[0], -1, low_res_masks.shape[3], low_res_masks.shape[4])\n outputs = {\"low_res_logits\": low_res_masks, \"masks\": masks}\n return outputs\n\n\n\n def postprocess_masks(\n self,\n masks: torch.Tensor,\n input_size: Tuple[int, ...],\n original_size: Tuple[int, ...],\n ) -> torch.Tensor:\n \"\"\"\n Remove padding and upscale masks to the original image size.\n\n Arguments:\n masks (torch.Tensor): Batched masks from the mask_decoder,\n in BxCxHxW format.\n input_size (tuple(int, int)): The size of the image input to the\n model, in (H, W) format. Used to remove padding.\n original_size (tuple(int, int)): The original size of the image\n before resizing for input to the model, in (H, W) format.\n\n Returns:\n (torch.Tensor): Batched masks in BxCxHxW format, where (H, W)\n is given by original_size.\n \"\"\"\n masks = F.interpolate(\n masks,\n (self.image_encoder.img_size, self.image_encoder.img_size),\n mode=\"bilinear\",\n align_corners=False,\n )\n masks = masks[..., : input_size[0], : input_size[1]]\n masks = F.interpolate(masks, original_size, mode=\"bilinear\", align_corners=False)\n return masks\n\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n\n # Pad\n h, w = x.shape[-2:]\n padh = self.image_encoder.img_size - h\n padw = self.image_encoder.img_size - w\n x = F.pad(x, (0, padw, 0, padh))\n return x" }, { "identifier": "MaskData", "path": "models/segment_anything_samihs/utils/amg.py", "snippet": "class MaskData:\n \"\"\"\n A structure for storing masks and their related data in batched format.\n Implements basic filtering and concatenation.\n \"\"\"\n\n def __init__(self, **kwargs) -> None:\n for v in kwargs.values():\n assert isinstance(\n v, (list, np.ndarray, torch.Tensor)\n ), \"MaskData only supports list, numpy arrays, and torch tensors.\"\n self._stats = dict(**kwargs)\n\n def __setitem__(self, key: str, item: Any) -> None:\n assert isinstance(\n item, (list, np.ndarray, torch.Tensor)\n ), \"MaskData only supports list, numpy arrays, and torch tensors.\"\n self._stats[key] = item\n\n def __delitem__(self, key: str) -> None:\n del self._stats[key]\n\n def __getitem__(self, key: str) -> Any:\n return self._stats[key]\n\n def items(self) -> ItemsView[str, Any]:\n return self._stats.items()\n\n def filter(self, keep: torch.Tensor) -> None:\n for k, v in self._stats.items():\n if v is None:\n self._stats[k] = None\n elif isinstance(v, torch.Tensor):\n self._stats[k] = v[torch.as_tensor(keep, device=v.device)]\n elif isinstance(v, np.ndarray):\n self._stats[k] = v[keep.detach().cpu().numpy()]\n elif isinstance(v, list) and keep.dtype == torch.bool:\n self._stats[k] = [a for i, a in enumerate(v) if keep[i]]\n elif isinstance(v, list):\n self._stats[k] = [v[i] for i in keep]\n else:\n raise TypeError(f\"MaskData key {k} has an unsupported type {type(v)}.\")\n\n def cat(self, new_stats: \"MaskData\") -> None:\n for k, v in new_stats.items():\n if k not in self._stats or self._stats[k] is None:\n self._stats[k] = deepcopy(v)\n elif isinstance(v, torch.Tensor):\n self._stats[k] = torch.cat([self._stats[k], v], dim=0)\n elif isinstance(v, np.ndarray):\n self._stats[k] = np.concatenate([self._stats[k], v], axis=0)\n elif isinstance(v, list):\n self._stats[k] = self._stats[k] + deepcopy(v)\n else:\n raise TypeError(f\"MaskData key {k} has an unsupported type {type(v)}.\")\n\n def to_numpy(self) -> None:\n for k, v in self._stats.items():\n if isinstance(v, torch.Tensor):\n self._stats[k] = v.detach().cpu().numpy()" }, { "identifier": "area_from_rle", "path": "models/segment_anything_samihs/utils/amg.py", "snippet": "def area_from_rle(rle: Dict[str, Any]) -> int:\n return sum(rle[\"counts\"][1::2])" }, { "identifier": "batch_iterator", "path": "models/segment_anything_samihs/utils/amg.py", "snippet": "def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]:\n assert len(args) > 0 and all(\n len(a) == len(args[0]) for a in args\n ), \"Batched iteration must have inputs of all the same size.\"\n n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0)\n for b in range(n_batches):\n yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args]" }, { "identifier": "batched_mask_to_box", "path": "models/segment_anything_samihs/utils/amg.py", "snippet": "def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Calculates boxes in XYXY format around masks. Return [0,0,0,0] for\n an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4.\n \"\"\"\n # torch.max below raises an error on empty inputs, just skip in this case\n if torch.numel(masks) == 0:\n return torch.zeros(*masks.shape[:-2], 4, device=masks.device)\n\n # Normalize shape to CxHxW\n shape = masks.shape\n h, w = shape[-2:]\n if len(shape) > 2:\n masks = masks.flatten(0, -3)\n else:\n masks = masks.unsqueeze(0)\n\n # Get top and bottom edges\n in_height, _ = torch.max(masks, dim=-1)\n in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :]\n bottom_edges, _ = torch.max(in_height_coords, dim=-1)\n in_height_coords = in_height_coords + h * (~in_height)\n top_edges, _ = torch.min(in_height_coords, dim=-1)\n\n # Get left and right edges\n in_width, _ = torch.max(masks, dim=-2)\n in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :]\n right_edges, _ = torch.max(in_width_coords, dim=-1)\n in_width_coords = in_width_coords + w * (~in_width)\n left_edges, _ = torch.min(in_width_coords, dim=-1)\n\n # If the mask is empty the right edge will be to the left of the left edge.\n # Replace these boxes with [0, 0, 0, 0]\n empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)\n out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)\n out = out * (~empty_filter).unsqueeze(-1)\n\n # Return to original shape\n if len(shape) > 2:\n out = out.reshape(*shape[:-2], 4)\n else:\n out = out[0]\n\n return out" }, { "identifier": "box_xyxy_to_xywh", "path": "models/segment_anything_samihs/utils/amg.py", "snippet": "def box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor:\n box_xywh = deepcopy(box_xyxy)\n box_xywh[2] = box_xywh[2] - box_xywh[0]\n box_xywh[3] = box_xywh[3] - box_xywh[1]\n return box_xywh" }, { "identifier": "build_all_layer_point_grids", "path": "models/segment_anything_samihs/utils/amg.py", "snippet": "def build_all_layer_point_grids(\n n_per_side: int, n_layers: int, scale_per_layer: int\n) -> List[np.ndarray]:\n \"\"\"Generates point grids for all crop layers.\"\"\"\n points_by_layer = []\n for i in range(n_layers + 1):\n n_points = int(n_per_side / (scale_per_layer**i))\n points_by_layer.append(build_point_grid(n_points))\n return points_by_layer" }, { "identifier": "calculate_stability_score", "path": "models/segment_anything_samihs/utils/amg.py", "snippet": "def calculate_stability_score(\n masks: torch.Tensor, mask_threshold: float, threshold_offset: float\n) -> torch.Tensor:\n \"\"\"\n Computes the stability score for a batch of masks. The stability\n score is the IoU between the binary masks obtained by thresholding\n the predicted mask logits at high and low values.\n \"\"\"\n # One mask is always contained inside the other.\n # Save memory by preventing unnecessary cast to torch.int64\n intersections = (\n (masks > (mask_threshold + threshold_offset))\n .sum(-1, dtype=torch.int16)\n .sum(-1, dtype=torch.int32)\n )\n unions = (\n (masks > (mask_threshold - threshold_offset))\n .sum(-1, dtype=torch.int16)\n .sum(-1, dtype=torch.int32)\n )\n return intersections / unions" }, { "identifier": "coco_encode_rle", "path": "models/segment_anything_samihs/utils/amg.py", "snippet": "def coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]:\n from pycocotools import mask as mask_utils # type: ignore\n\n h, w = uncompressed_rle[\"size\"]\n rle = mask_utils.frPyObjects(uncompressed_rle, h, w)\n rle[\"counts\"] = rle[\"counts\"].decode(\"utf-8\") # Necessary to serialize with json\n return rle" }, { "identifier": "generate_crop_boxes", "path": "models/segment_anything_samihs/utils/amg.py", "snippet": "def generate_crop_boxes(\n im_size: Tuple[int, ...], n_layers: int, overlap_ratio: float\n) -> Tuple[List[List[int]], List[int]]:\n \"\"\"\n Generates a list of crop boxes of different sizes. Each layer\n has (2**i)**2 boxes for the ith layer.\n \"\"\"\n crop_boxes, layer_idxs = [], []\n im_h, im_w = im_size\n short_side = min(im_h, im_w)\n\n # Original image\n crop_boxes.append([0, 0, im_w, im_h])\n layer_idxs.append(0)\n\n def crop_len(orig_len, n_crops, overlap):\n return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops))\n\n for i_layer in range(n_layers):\n n_crops_per_side = 2 ** (i_layer + 1)\n overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side))\n\n crop_w = crop_len(im_w, n_crops_per_side, overlap)\n crop_h = crop_len(im_h, n_crops_per_side, overlap)\n\n crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)]\n crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)]\n\n # Crops in XYWH format\n for x0, y0 in product(crop_box_x0, crop_box_y0):\n box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)]\n crop_boxes.append(box)\n layer_idxs.append(i_layer + 1)\n\n return crop_boxes, layer_idxs" }, { "identifier": "is_box_near_crop_edge", "path": "models/segment_anything_samihs/utils/amg.py", "snippet": "def is_box_near_crop_edge(\n boxes: torch.Tensor, crop_box: List[int], orig_box: List[int], atol: float = 20.0\n) -> torch.Tensor:\n \"\"\"Filter masks at the edge of a crop, but not at the edge of the original image.\"\"\"\n crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)\n orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)\n boxes = uncrop_boxes_xyxy(boxes, crop_box).float()\n near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0)\n near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0)\n near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge)\n return torch.any(near_crop_edge, dim=1)" }, { "identifier": "mask_to_rle_pytorch", "path": "models/segment_anything_samihs/utils/amg.py", "snippet": "def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]:\n \"\"\"\n Encodes masks to an uncompressed RLE, in the format expected by\n pycoco tools.\n \"\"\"\n # Put in fortran order and flatten h,w\n b, h, w = tensor.shape\n tensor = tensor.permute(0, 2, 1).flatten(1)\n\n # Compute change indices\n diff = tensor[:, 1:] ^ tensor[:, :-1]\n change_indices = diff.nonzero()\n\n # Encode run length\n out = []\n for i in range(b):\n cur_idxs = change_indices[change_indices[:, 0] == i, 1]\n cur_idxs = torch.cat(\n [\n torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device),\n cur_idxs + 1,\n torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device),\n ]\n )\n btw_idxs = cur_idxs[1:] - cur_idxs[:-1]\n counts = [] if tensor[i, 0] == 0 else [0]\n counts.extend(btw_idxs.detach().cpu().tolist())\n out.append({\"size\": [h, w], \"counts\": counts})\n return out" }, { "identifier": "remove_small_regions", "path": "models/segment_anything_samihs/utils/amg.py", "snippet": "def remove_small_regions(\n mask: np.ndarray, area_thresh: float, mode: str\n) -> Tuple[np.ndarray, bool]:\n \"\"\"\n Removes small disconnected regions and holes in a mask. Returns the\n mask and an indicator of if the mask has been modified.\n \"\"\"\n import cv2 # type: ignore\n\n assert mode in [\"holes\", \"islands\"]\n correct_holes = mode == \"holes\"\n working_mask = (correct_holes ^ mask).astype(np.uint8)\n n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8)\n sizes = stats[:, -1][1:] # Row 0 is background label\n small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh]\n if len(small_regions) == 0:\n return mask, False\n fill_labels = [0] + small_regions\n if not correct_holes:\n fill_labels = [i for i in range(n_labels) if i not in fill_labels]\n # If every region is below threshold, keep largest\n if len(fill_labels) == 0:\n fill_labels = [int(np.argmax(sizes)) + 1]\n mask = np.isin(regions, fill_labels)\n return mask, True" }, { "identifier": "rle_to_mask", "path": "models/segment_anything_samihs/utils/amg.py", "snippet": "def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray:\n \"\"\"Compute a binary mask from an uncompressed RLE.\"\"\"\n h, w = rle[\"size\"]\n mask = np.empty(h * w, dtype=bool)\n idx = 0\n parity = False\n for count in rle[\"counts\"]:\n mask[idx : idx + count] = parity\n idx += count\n parity ^= True\n mask = mask.reshape(w, h)\n return mask.transpose() # Put in C order" }, { "identifier": "uncrop_boxes_xyxy", "path": "models/segment_anything_samihs/utils/amg.py", "snippet": "def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\n x0, y0, _, _ = crop_box\n offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device)\n # Check if boxes has a channel dimension\n if len(boxes.shape) == 3:\n offset = offset.unsqueeze(1)\n return boxes + offset" }, { "identifier": "uncrop_masks", "path": "models/segment_anything_samihs/utils/amg.py", "snippet": "def uncrop_masks(\n masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int\n) -> torch.Tensor:\n x0, y0, x1, y1 = crop_box\n if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h:\n return masks\n # Coordinate transform masks\n pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0)\n pad = (x0, pad_x - x0, y0, pad_y - y0)\n return torch.nn.functional.pad(masks, pad, value=0)" }, { "identifier": "uncrop_points", "path": "models/segment_anything_samihs/utils/amg.py", "snippet": "def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\n x0, y0, _, _ = crop_box\n offset = torch.tensor([[x0, y0]], device=points.device)\n # Check if points has a channel dimension\n if len(points.shape) == 3:\n offset = offset.unsqueeze(1)\n return points + offset" } ]
import numpy as np import torch import cv2 # type: ignore # noqa: F401 from torchvision.ops.boxes import batched_nms, box_area # type: ignore from typing import Any, Dict, List, Optional, Tuple from .modeling import Samihs from .utils.amg import ( MaskData, area_from_rle, batch_iterator, batched_mask_to_box, box_xyxy_to_xywh, build_all_layer_point_grids, calculate_stability_score, coco_encode_rle, generate_crop_boxes, is_box_near_crop_edge, mask_to_rle_pytorch, remove_small_regions, rle_to_mask, uncrop_boxes_xyxy, uncrop_masks, uncrop_points, ) from pycocotools import mask as mask_utils # type: ignore # noqa: F401
8,703
Arguments: image (np.ndarray): The image to generate masks for, in HWC uint8 format. Returns: list(dict(str, any)): A list over records for masks. Each record is a dict containing the following keys: segmentation (dict(str, any) or np.ndarray): The mask. If output_mode='binary_mask', is an array of shape HW. Otherwise, is a dictionary containing the RLE. bbox (list(float)): The box around the mask, in XYWH format. area (int): The area in pixels of the mask. predicted_iou (float): The model's own prediction of the mask's quality. This is filtered by the pred_iou_thresh parameter. point_coords (list(list(float))): The point coordinates input to the model to generate this mask. stability_score (float): A measure of the mask's quality. This is filtered on using the stability_score_thresh parameter. crop_box (list(float)): The crop of the image used to generate the mask, given in XYWH format. """ # Generate masks mask_data = self._generate_masks(image) # Filter small disconnected regions and holes in masks if self.min_mask_region_area > 0: mask_data = self.postprocess_small_regions( mask_data, self.min_mask_region_area, max(self.box_nms_thresh, self.crop_nms_thresh), ) # Encode masks if self.output_mode == "coco_rle": mask_data["segmentations"] = [coco_encode_rle(rle) for rle in mask_data["rles"]] elif self.output_mode == "binary_mask": mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]] else: mask_data["segmentations"] = mask_data["rles"] # Write mask records curr_anns = [] for idx in range(len(mask_data["segmentations"])): ann = { "segmentation": mask_data["segmentations"][idx], "area": area_from_rle(mask_data["rles"][idx]), "bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(), "predicted_iou": mask_data["iou_preds"][idx].item(), "point_coords": [mask_data["points"][idx].tolist()], "stability_score": mask_data["stability_score"][idx].item(), "crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(), } curr_anns.append(ann) return curr_anns def _generate_masks(self, image: np.ndarray) -> MaskData: orig_size = image.shape[:2] crop_boxes, layer_idxs = generate_crop_boxes( orig_size, self.crop_n_layers, self.crop_overlap_ratio ) # Iterate over image crops data = MaskData() for crop_box, layer_idx in zip(crop_boxes, layer_idxs): crop_data = self._process_crop(image, crop_box, layer_idx, orig_size) data.cat(crop_data) # Remove duplicate masks between crops if len(crop_boxes) > 1: # Prefer masks from smaller crops scores = 1 / box_area(data["crop_boxes"]) scores = scores.to(data["boxes"].device) keep_by_nms = batched_nms( data["boxes"].float(), scores, torch.zeros_like(data["boxes"][:, 0]), # categories iou_threshold=self.crop_nms_thresh, ) data.filter(keep_by_nms) data.to_numpy() return data def _process_crop( self, image: np.ndarray, crop_box: List[int], crop_layer_idx: int, orig_size: Tuple[int, ...], ) -> MaskData: # Crop the image and calculate embeddings x0, y0, x1, y1 = crop_box cropped_im = image[y0:y1, x0:x1, :] cropped_im_size = cropped_im.shape[:2] self.predictor.set_image(cropped_im) # Get points for this crop points_scale = np.array(cropped_im_size)[None, ::-1] points_for_image = self.point_grids[crop_layer_idx] * points_scale # Generate masks for this crop in batches data = MaskData() for (points,) in batch_iterator(self.points_per_batch, points_for_image): batch_data = self._process_batch(points, cropped_im_size, crop_box, orig_size) data.cat(batch_data) del batch_data self.predictor.reset_image() # Remove duplicates within this crop. keep_by_nms = batched_nms( data["boxes"].float(), data["iou_preds"], torch.zeros_like(data["boxes"][:, 0]), # categories iou_threshold=self.box_nms_thresh, ) data.filter(keep_by_nms) # Return to the original image frame data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. class SamAutomaticMaskGenerator: def __init__( self, model: Samihs, points_per_side: Optional[int] = 32, points_per_batch: int = 64, pred_iou_thresh: float = 0.88, stability_score_thresh: float = 0.95, stability_score_offset: float = 1.0, box_nms_thresh: float = 0.7, crop_n_layers: int = 0, crop_nms_thresh: float = 0.7, crop_overlap_ratio: float = 512 / 1500, crop_n_points_downscale_factor: int = 1, point_grids: Optional[List[np.ndarray]] = None, min_mask_region_area: int = 0, output_mode: str = "binary_mask", ) -> None: """ Using a SAM model, generates masks for the entire image. Generates a grid of point prompts over the image, then filters low quality and duplicate masks. The default settings are chosen for SAM with a ViT-H backbone. Arguments: model (Sam): The SAM model to use for mask prediction. points_per_side (int or None): The number of points to be sampled along one side of the image. The total number of points is points_per_side**2. If None, 'point_grids' must provide explicit point sampling. points_per_batch (int): Sets the number of points run simultaneously by the model. Higher numbers may be faster but use more GPU memory. pred_iou_thresh (float): A filtering threshold in [0,1], using the model's predicted mask quality. stability_score_thresh (float): A filtering threshold in [0,1], using the stability of the mask under changes to the cutoff used to binarize the model's mask predictions. stability_score_offset (float): The amount to shift the cutoff when calculated the stability score. box_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks. crop_n_layers (int): If >0, mask prediction will be run again on crops of the image. Sets the number of layers to run, where each layer has 2**i_layer number of image crops. crop_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks between different crops. crop_overlap_ratio (float): Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of the image length. Later layers with more crops scale down this overlap. crop_n_points_downscale_factor (int): The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n. point_grids (list(np.ndarray) or None): A list over explicit grids of points used for sampling, normalized to [0,1]. The nth grid in the list is used in the nth crop layer. Exclusive with points_per_side. min_mask_region_area (int): If >0, postprocessing will be applied to remove disconnected regions and holes in masks with area smaller than min_mask_region_area. Requires opencv. output_mode (str): The form masks are returned in. Can be 'binary_mask', 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools. For large resolutions, 'binary_mask' may consume large amounts of memory. """ assert (points_per_side is None) != ( point_grids is None ), "Exactly one of points_per_side or point_grid must be provided." if points_per_side is not None: self.point_grids = build_all_layer_point_grids( points_per_side, crop_n_layers, crop_n_points_downscale_factor, ) elif point_grids is not None: self.point_grids = point_grids else: raise ValueError("Can't have both points_per_side and point_grid be None.") assert output_mode in [ "binary_mask", "uncompressed_rle", "coco_rle", ], f"Unknown output_mode {output_mode}." if output_mode == "coco_rle": if min_mask_region_area > 0: self.points_per_batch = points_per_batch self.pred_iou_thresh = pred_iou_thresh self.stability_score_thresh = stability_score_thresh self.stability_score_offset = stability_score_offset self.box_nms_thresh = box_nms_thresh self.crop_n_layers = crop_n_layers self.crop_nms_thresh = crop_nms_thresh self.crop_overlap_ratio = crop_overlap_ratio self.crop_n_points_downscale_factor = crop_n_points_downscale_factor self.min_mask_region_area = min_mask_region_area self.output_mode = output_mode @torch.no_grad() def generate(self, image: np.ndarray) -> List[Dict[str, Any]]: """ Generates masks for the given image. Arguments: image (np.ndarray): The image to generate masks for, in HWC uint8 format. Returns: list(dict(str, any)): A list over records for masks. Each record is a dict containing the following keys: segmentation (dict(str, any) or np.ndarray): The mask. If output_mode='binary_mask', is an array of shape HW. Otherwise, is a dictionary containing the RLE. bbox (list(float)): The box around the mask, in XYWH format. area (int): The area in pixels of the mask. predicted_iou (float): The model's own prediction of the mask's quality. This is filtered by the pred_iou_thresh parameter. point_coords (list(list(float))): The point coordinates input to the model to generate this mask. stability_score (float): A measure of the mask's quality. This is filtered on using the stability_score_thresh parameter. crop_box (list(float)): The crop of the image used to generate the mask, given in XYWH format. """ # Generate masks mask_data = self._generate_masks(image) # Filter small disconnected regions and holes in masks if self.min_mask_region_area > 0: mask_data = self.postprocess_small_regions( mask_data, self.min_mask_region_area, max(self.box_nms_thresh, self.crop_nms_thresh), ) # Encode masks if self.output_mode == "coco_rle": mask_data["segmentations"] = [coco_encode_rle(rle) for rle in mask_data["rles"]] elif self.output_mode == "binary_mask": mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]] else: mask_data["segmentations"] = mask_data["rles"] # Write mask records curr_anns = [] for idx in range(len(mask_data["segmentations"])): ann = { "segmentation": mask_data["segmentations"][idx], "area": area_from_rle(mask_data["rles"][idx]), "bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(), "predicted_iou": mask_data["iou_preds"][idx].item(), "point_coords": [mask_data["points"][idx].tolist()], "stability_score": mask_data["stability_score"][idx].item(), "crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(), } curr_anns.append(ann) return curr_anns def _generate_masks(self, image: np.ndarray) -> MaskData: orig_size = image.shape[:2] crop_boxes, layer_idxs = generate_crop_boxes( orig_size, self.crop_n_layers, self.crop_overlap_ratio ) # Iterate over image crops data = MaskData() for crop_box, layer_idx in zip(crop_boxes, layer_idxs): crop_data = self._process_crop(image, crop_box, layer_idx, orig_size) data.cat(crop_data) # Remove duplicate masks between crops if len(crop_boxes) > 1: # Prefer masks from smaller crops scores = 1 / box_area(data["crop_boxes"]) scores = scores.to(data["boxes"].device) keep_by_nms = batched_nms( data["boxes"].float(), scores, torch.zeros_like(data["boxes"][:, 0]), # categories iou_threshold=self.crop_nms_thresh, ) data.filter(keep_by_nms) data.to_numpy() return data def _process_crop( self, image: np.ndarray, crop_box: List[int], crop_layer_idx: int, orig_size: Tuple[int, ...], ) -> MaskData: # Crop the image and calculate embeddings x0, y0, x1, y1 = crop_box cropped_im = image[y0:y1, x0:x1, :] cropped_im_size = cropped_im.shape[:2] self.predictor.set_image(cropped_im) # Get points for this crop points_scale = np.array(cropped_im_size)[None, ::-1] points_for_image = self.point_grids[crop_layer_idx] * points_scale # Generate masks for this crop in batches data = MaskData() for (points,) in batch_iterator(self.points_per_batch, points_for_image): batch_data = self._process_batch(points, cropped_im_size, crop_box, orig_size) data.cat(batch_data) del batch_data self.predictor.reset_image() # Remove duplicates within this crop. keep_by_nms = batched_nms( data["boxes"].float(), data["iou_preds"], torch.zeros_like(data["boxes"][:, 0]), # categories iou_threshold=self.box_nms_thresh, ) data.filter(keep_by_nms) # Return to the original image frame data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box)
data["points"] = uncrop_points(data["points"], crop_box)
16
2023-11-09 07:26:33+00:00
12k
tianhaowuhz/human-assisting-dex-grasp
Runners/TrainSDE_update.py
[ { "identifier": "loss_fn_cond", "path": "Algorithms/SDE_update.py", "snippet": "def loss_fn_cond(model, x, marginal_prob_fn, sde_fn, is_likelihood_weighting=False, eps=1e-5, device='cuda:0', hand_pcl=False, full_state=None, envs=None, hand_model=None, space='euler', relative=True):\n \"\"\"\n is_likelihood_weighting = True, can potentially improve likelihood-estimation (e.g., for reward learning)\n \"\"\"\n hand_dof_batch, obj_pcl_batch = x\n if space == 'riemann':\n hand_dof_batch = action2grad(hand_dof_batch, relative=relative)\n batchsize = hand_dof_batch.shape[0]\n random_t = torch.rand(batchsize, device=device) * (1. - eps) + eps\n # random_t = torch.pow(10,-5*random_t) \n random_t = random_t.unsqueeze(-1)\n z = torch.randn_like(hand_dof_batch)\n mu, std = marginal_prob_fn(hand_dof_batch, random_t)\n perturbed_hand_dof_batch = mu + z * std\n\n if hand_pcl:\n if space == 'riemann':\n hand_dof = action2grad(perturbed_hand_dof_batch.clone(), relative=relative, inv=True)\n else:\n hand_dof = perturbed_hand_dof_batch.clone() \n hand_pos_2_w = full_state[:,18:21].clone().to(device).float()\n hand_quat_2_w = full_state[:,21:25].clone().to(device).float()\n hand_pos_2_h, hand_quat_2_h = envs.transform_target2source(hand_quat_2_w, hand_pos_2_w, hand_quat_2_w, hand_pos_2_w)\n\n ori_hand_dof = envs.dof_norm(hand_dof.clone(),inv=True)\n hand_pcl_2h = hand_model.get_hand_pcl(hand_pos=hand_pos_2_h, hand_quat=hand_quat_2_h, hand_dof=ori_hand_dof)\n obj_pcl_batch = torch.cat([obj_pcl_batch, hand_pcl_2h.reshape(hand_pcl_2h.size(0),hand_pcl_2h.size(2),hand_pcl_2h.size(1))],2)\n\n output = model((perturbed_hand_dof_batch.reshape(batchsize, -1, 1), obj_pcl_batch), random_t)\n\n total_loss = (output + z / std) ** 2\n if is_likelihood_weighting:\n _, diffusion_coeff = sde_fn(random_t)\n loss_weighting = diffusion_coeff ** 2\n node_l2 = torch.sum(total_loss, dim=-1) * loss_weighting\n else:\n loss_weighting = std ** 2\n node_l2 = torch.sum(total_loss * loss_weighting, dim=-1)\n loss_ = torch.mean(node_l2)\n return loss_" }, { "identifier": "cond_ode_sampler", "path": "Algorithms/SDE_update.py", "snippet": "def cond_ode_sampler(\n score_model,\n prior_fn,\n sde_fn,\n state,\n batch_size=64,\n atol=1e-5,\n rtol=1e-5,\n device='cuda',\n eps=1e-5,\n t0=1,\n num_steps=None,\n is_random=True,\n denoise=True, \n hand_pcl=False, \n full_state=None, \n envs=None, \n hand_model=None,\n space='euler',\n relative=True,\n):\n hand_dof_batch, obj_pcl_batch = state\n if space == 'riemann':\n hand_dof_batch = action2grad(hand_dof_batch, relative=relative)\n t0_ = torch.ones(batch_size, device=device)*t0\n\n if is_random:\n init_x = prior_fn(hand_dof_batch.shape).to(device) # normal distribution\n # init_x = torch.randn_like(hand_dof_batch, device=device) * marginal_prob_std(t0_)\n # init_x = -torch.ones_like(hand_dof_batch, device=device)\n # init_x = torch.tensor([ 0.0000, -0.7143, -1.0000, 0.0000, -0.7143, -1.0000, 0.0000, -0.7143,\n # -1.0000, -1.0000, 0.0000, -0.7143, -1.0000, 0.0000, -1.0000, 0.0000,\n # 0.0000, -1.0000,1,1,1,1,1,1,1], device=device).reshape(1,-1)[:,:hand_dof_batch.size(1)].expand_as(hand_dof_batch)\n else:\n batch_size = hand_dof_batch.size(0)\n init_x = hand_dof_batch\n \n # Create the latent code\n # init_x = torch.randn_like(hand_dof_batch, device=device) * marginal_prob_std(t0_)\n # !!! for dex hand only, set to same init state\n # init_x = hand_dof_batch\n shape = init_x.shape\n state_dim = shape[-1]\n\n def score_eval_wrapper(sample, time_steps):\n \"\"\"A wrapper of the score-based model for use by the ODE solver.\"\"\"\n with torch.no_grad():\n score = score_model(sample, time_steps)\n # return score.cpu().numpy().reshape((-1,))\n return score.cpu().numpy().reshape(-1)\n\n def ode_func(t, x):\n \"\"\"The ODE function for use by the ODE solver.\"\"\"\n x = torch.tensor(x.reshape(-1, state_dim)).to(device).float()\n time_steps = torch.ones(batch_size, device=device).unsqueeze(1) * t\n # if batch_size == 1:\n # time_steps = torch.ones(batch_size, device=device).unsqueeze(1) * t\n # else:\n # time_steps = torch.ones(batch_size, device=device) * t\n drift, diffusion = sde_fn(torch.tensor(t))\n drift = drift.cpu().numpy()\n diffusion = diffusion.cpu().numpy()\n if hand_pcl:\n hand_dof = x.clone() \n hand_pos_2_w = full_state[:,18:21].clone().to(device).float()\n hand_quat_2_w = full_state[:,21:25].clone().to(device).float()\n hand_pos_2_h, hand_quat_2_h = envs.transform_target2source(hand_quat_2_w, hand_pos_2_w, hand_quat_2_w, hand_pos_2_w)\n\n if space == 'riemann':\n hand_dof = action2grad(hand_dof.clone(), relative=relative, inv=True)\n else:\n hand_dof = perturbed_hand_dof_batch.clone() \n\n ori_hand_dof = envs.dof_norm(hand_dof.clone(),inv=True)\n hand_pcl_2h = hand_model.get_hand_pcl(hand_pos=hand_pos_2_h, hand_quat=hand_quat_2_h, hand_dof=ori_hand_dof)\n objhand_pcl_batch = torch.cat([obj_pcl_batch, hand_pcl_2h.reshape(hand_pcl_2h.size(0),hand_pcl_2h.size(2),hand_pcl_2h.size(1))],2)\n gradient = score_eval_wrapper((x, objhand_pcl_batch), time_steps)\n else:\n gradient = score_eval_wrapper((x, obj_pcl_batch), time_steps)\n # gradient[:6]*=100\n # gradient[6:30]*=10\n return drift - 0.5 * (diffusion**2) * gradient\n \n # Run the black-box ODE solver.\n t_eval = None\n if num_steps is not None:\n # num_steps, from t0 -> eps\n t_eval = np.linspace(t0, eps, num_steps)\n\n res = integrate.solve_ivp(ode_func, (t0, eps), init_x.reshape(-1).cpu().numpy(), rtol=rtol, atol=atol,\n method='RK45', t_eval=t_eval)\n # process, xs: [total_nodes*3, samples_num]\n # clamp for now TODO\n # xs = torch.clamp(torch.tensor(res.y, device=device).T, min=-1.0, max=1.0) \n xs = torch.tensor(res.y, device=device).T\n xs = xs.view(num_steps, hand_dof_batch.shape[0], -1)\n\n # result x: [total_nodes, 3]\n x = torch.clamp(torch.tensor(res.y[:, -1], device=device).reshape(shape), min=-1.0, max=1.0)\n # x = torch.tensor(res.y[:, -1], device=device).reshape(shape)\n\n # denoise, using the predictor step in P-C sampler\n if denoise:\n # Reverse diffusion predictor for denoising\n vec_eps = torch.ones((x.shape[0], 1), device=x.device) * eps\n drift, diffusion = sde_fn(vec_eps)\n grad = score_model((x.float(), obj_pcl_batch), vec_eps)\n drift = drift - diffusion ** 2 * grad # R-SDE\n mean_x = x + drift * ((1 - eps) / (1000 if num_steps is None else num_steps))\n x = mean_x\n \n if space=='riemann':\n xs = action2grad(xs, inv=True, relative=relative)\n x = action2grad(x, inv=True, relative=relative)\n \n return xs, x" }, { "identifier": "init_sde", "path": "Algorithms/SDE_update.py", "snippet": "def init_sde(sde_mode, min=0.1, max=10.0):\n # the SDE-related hyperparameters are copied from https://github.com/yang-song/score_sde_pytorch\n if sde_mode == 've':\n sigma_min = 0.01\n sigma_max = 90\n prior_fn = functools.partial(ve_prior, sigma_min=sigma_min, sigma_max=sigma_max)\n marginal_prob_fn = functools.partial(ve_marginal_prob, sigma_min=sigma_min, sigma_max=sigma_max)\n sde_fn = functools.partial(ve_sde, sigma_min=sigma_min, sigma_max=sigma_max)\n elif sde_mode == 'vp':\n beta_0 = min\n beta_1 = max\n print(beta_0, beta_1)\n prior_fn = functools.partial(vp_prior, beta_0=beta_0, beta_1=beta_1)\n marginal_prob_fn = functools.partial(vp_marginal_prob, beta_0=beta_0, beta_1=beta_1)\n sde_fn = functools.partial(vp_sde, beta_0=beta_0, beta_1=beta_1)\n elif sde_mode == 'subvp':\n beta_0 = 0.1\n beta_1 = 20\n prior_fn = functools.partial(subvp_prior, beta_0=beta_0, beta_1=beta_1)\n marginal_prob_fn = functools.partial(subvp_marginal_prob, beta_0=beta_0, beta_1=beta_1)\n sde_fn = functools.partial(subvp_sde, beta_0=beta_0, beta_1=beta_1)\n else:\n raise NotImplementedError\n return prior_fn, marginal_prob_fn, sde_fn" }, { "identifier": "ExponentialMovingAverage", "path": "Algorithms/SDE_update.py", "snippet": "class ExponentialMovingAverage:\n \"\"\"\n Maintains (exponential) moving average of a set of parameters.\n \"\"\"\n\n def __init__(self, parameters, decay, use_num_updates=True):\n \"\"\"\n Args:\n parameters: Iterable of `torch.nn.Parameter`; usually the result of\n `model.parameters()`.\n decay: The exponential decay.\n use_num_updates: Whether to use number of updates when computing\n averages.\n \"\"\"\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n self.decay = decay\n self.num_updates = 0 if use_num_updates else None\n self.shadow_params = [p.clone().detach()\n for p in parameters if p.requires_grad]\n self.collected_params = []\n\n def update(self, parameters):\n \"\"\"\n Update currently maintained parameters.\n\n Call this every time the parameters are updated, such as the result of\n the `optimizer.step()` call.\n\n Args:\n parameters: Iterable of `torch.nn.Parameter`; usually the same set of\n parameters used to initialize this object.\n \"\"\"\n decay = self.decay\n if self.num_updates is not None:\n self.num_updates += 1\n decay = min(decay, (1 + self.num_updates) / (10 + self.num_updates))\n one_minus_decay = 1.0 - decay\n with torch.no_grad():\n parameters = [p for p in parameters if p.requires_grad]\n for s_param, param in zip(self.shadow_params, parameters):\n s_param.sub_(one_minus_decay * (s_param - param)) # only update the ema-params\n\n def copy_to(self, parameters):\n \"\"\"\n Copy current parameters into given collection of parameters.\n\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored moving averages.\n \"\"\"\n parameters = [p for p in parameters if p.requires_grad]\n for s_param, param in zip(self.shadow_params, parameters):\n if param.requires_grad:\n param.data.copy_(s_param.data)\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)\n\n def state_dict(self):\n return dict(decay=self.decay, num_updates=self.num_updates,\n shadow_params=self.shadow_params)\n\n def load_state_dict(self, state_dict):\n self.decay = state_dict['decay']\n self.num_updates = state_dict['num_updates']\n self.shadow_params = state_dict['shadow_params']" }, { "identifier": "CondScoreModel", "path": "Networks/SDENets_update.py", "snippet": "class CondScoreModel(nn.Module):\n def __init__(self, marginal_prob_func, hidden_dim, embed_dim, state_dim=1,\n mode='target', relative=False, pointnet_version='pt2', n_blocks=0, feature_dim_coff=1, space='euler'):\n super(CondScoreModel, self).__init__()\n self.marginal_prob_func = marginal_prob_func\n self.point_feat_dim = 1088\n hidden_dim = hidden_dim\n embed_dim = embed_dim\n self.embed_dim = embed_dim\n self.mode = mode\n self.pointnet_version = pointnet_version\n if relative:\n hand_state_dim = 18\n if space == 'riemann':\n hand_state_dim = 18+18\n else:\n hand_state_dim = 25\n if space == 'riemann':\n hand_state_dim = 25+18\n \n self.n_blocks = n_blocks\n self.hand_global_enc = nn.Sequential(\n nn.Linear(hand_state_dim, hidden_dim),\n nn.ReLU(False),\n nn.Linear(hidden_dim, hidden_dim),\n nn.ReLU(False),\n )\n # obj pcl feature encoder\n if pointnet_version == 'pt':\n self.obj_enc = PointNetEncoder(global_feat=True, feature_transform=False, channel=3) # for pointnet\n elif pointnet_version == 'pt2':\n self.obj_enc = Pointnet2Backbone(feature_dim_coff=feature_dim_coff) # for pointnet2\n # self.obj_enc = PointNetEncoder() # for pointnet2\n # self.obj_cat_embed = nn.Embedding(301,512)\n\n if self.n_blocks < 1:\n self.obj_global_enc = nn.Sequential(\n nn.Linear(1024, hidden_dim),\n nn.ReLU(),\n nn.Linear(hidden_dim, embed_dim),\n nn.ReLU(),\n )\n self.embed_sigma = nn.Sequential(GaussianFourierProjection(embed_dim=embed_dim),\n nn.Linear(embed_dim, embed_dim))\n\n if n_blocks < 1:\n self.init_enc = nn.Sequential(\n nn.Linear(state_dim, hidden_dim),\n nn.ReLU(),\n nn.Linear(hidden_dim, self.point_feat_dim),\n nn.ReLU(),\n )\n\n # cond_dim = hidden_dim*2 + embed_dim*2 # consider wall\n if self.mode == 'target':\n cond_dim = embed_dim\n \n # self.mhca = MHCA(num_heads=2, inp_dim=self.point_feat_dim, hid_dim=self.point_feat_dim)\n ''' main backbone '''\n # # mlp1\n self.mlp1_main = nn.Sequential(\n nn.Linear((hidden_dim + embed_dim*2), hidden_dim),\n nn.ReLU(),\n nn.Linear(hidden_dim, hidden_dim),\n )\n # # mlp2\n self.mlp2_main = nn.Sequential(\n nn.Linear(hidden_dim + embed_dim*2, hidden_dim),\n nn.ReLU(),\n nn.Linear(hidden_dim, hand_state_dim),\n )\n else:\n self.pre_dense_cond = nn.Linear(1024*feature_dim_coff, hidden_dim)\n self.pre_dense_t = nn.Linear(embed_dim, hidden_dim)\n # self.pre_gnorm = nn.GroupNorm(32, num_channels=hidden_dim)\n\n for idx in range(n_blocks):\n setattr(self, f'b{idx+1}_dense1', nn.Linear(hidden_dim, hidden_dim))\n setattr(self, f'b{idx+1}_dense1_t', nn.Linear(embed_dim, hidden_dim))\n setattr(self, f'b{idx+1}_dense1_cond', nn.Linear(hidden_dim, hidden_dim))\n # setattr(self, f'b{idx+1}_gnorm1', nn.GroupNorm(32, num_channels=hidden_dim))\n\n setattr(self, f'b{idx+1}_dense2', nn.Linear(hidden_dim, hidden_dim))\n setattr(self, f'b{idx+1}_dense2_t', nn.Linear(embed_dim, hidden_dim))\n setattr(self, f'b{idx+1}_dense2_cond', nn.Linear(hidden_dim, hidden_dim))\n # setattr(self, f'b{idx+1}_gnorm2', nn.GroupNorm(32, num_channels=hidden_dim))\n\n self.act = nn.ReLU(False)\n self.post_dense = nn.Linear(hidden_dim, hand_state_dim) \n\n def forward(self, batches, t, obj_feature=False):\n \"\"\"\n batches = hand_batch, obj_batch\n hand_batch: [bs, 25, 1]\n obj_batch: [bs, 3, 1024]\n t: [bs] !! not [bs, 1] !!\n \"\"\"\n hand_batch, obj_batch = batches\n batch_size = hand_batch.size(0)\n hand_dof = hand_batch.size(1)\n ''' get cond feat'''\n\n # sigma_feat: [num_nodes, embed_dim]\n sigma_feat = F.relu(self.embed_sigma(t.squeeze(-1)),inplace=False)\n\n # total_cond_feat: [num_nodes, hidden_dim*2+embed_dim*2]\n # obj_feat,_, _ = self.obj_enc(obj_batch.reshape(batch_size,-1,3)) # B x 1024\n\n ## no cuda pointnet2\n # obj_feat,_ = self.obj_enc(obj_batch) # B x 1024\n # obj_feat = self.obj_global_enc(obj_feat)\n if self.pointnet_version == 'pt':\n obj_feat,_,_ = self.obj_enc(obj_batch.reshape(batch_size,-1,3).permute(0,2,1)) # B x 1024\n elif self.pointnet_version == 'pt2':\n ## cuda pointnet2\n obj_feat,_ = self.obj_enc(obj_batch.reshape(batch_size,-1,3)) # B x 1024\n ## pointnet\n\n if obj_feature:\n obj_feat_fr = obj_feat.clone()\n\n if self.n_blocks < 1:\n ''' get init x feat '''\n hand_global_feat = self.hand_global_enc(hand_batch.reshape(batch_size,-1))\n obj_feat = self.obj_global_enc(obj_feat.reshape(batch_size,-1))\n\n # obj_feat = torch.arange(0,batch_size,device=hand_batch.device)\n # obj_feat = self.obj_cat_embed(obj_feat)\n if self.mode == 'target':\n total_cond_feat = torch.cat([sigma_feat, obj_feat], dim=-1) #\n # total_cond_feat = sigma_feat\n\n ''' main backbone of x '''\n x = torch.cat([hand_global_feat, total_cond_feat], -1)\n x = self.mlp1_main(x)\n x = torch.cat([x, total_cond_feat], -1)\n x = self.mlp2_main(x)\n else:\n obj_feat = obj_feat.reshape(batch_size,-1)\n obj_feat = self.pre_dense_cond(obj_feat)\n\n x = self.hand_global_enc(hand_batch.reshape(batch_size,-1))\n x = x + self.pre_dense_t(sigma_feat)\n x = x + obj_feat\n # x = self.pre_gnorm(x)\n x = self.act(x)\n \n for idx in range(self.n_blocks):\n x1 = getattr(self, f'b{idx+1}_dense1')(x)\n x1 = x1 + getattr(self, f'b{idx+1}_dense1_t')(sigma_feat)\n x1 = x1 + getattr(self, f'b{idx+1}_dense1_cond')(obj_feat)\n # x1 = getattr(self, f'b{idx+1}_gnorm1')(x1)\n x1 = self.act(x1)\n # dropout, maybe\n # x1 = self.dropout(x1)\n\n x2 = getattr(self, f'b{idx+1}_dense2')(x1)\n x2 = x2 + getattr(self, f'b{idx+1}_dense2_t')(sigma_feat)\n x2 = x2 + getattr(self, f'b{idx+1}_dense2_cond')(obj_feat)\n # x2 = getattr(self, f'b{idx+1}_gnorm2')(x2)\n x2 = self.act(x2)\n # dropout, maybe\n # x2 = self.dropout(x2)\n\n x = x + x2\n\n x = self.post_dense(x)\n # normalize the output\n \n _, std = self.marginal_prob_func(x, t) \n x = x / (std + 1e-7)\n if obj_feature:\n return x, obj_feat_fr\n else:\n return x" }, { "identifier": "exists_or_mkdir", "path": "utils/utils.py", "snippet": "def exists_or_mkdir(path):\n if not os.path.exists(path):\n os.makedirs(path)\n return False\n else:\n return True" }, { "identifier": "save_video", "path": "utils/utils.py", "snippet": "def save_video(env, states, save_path, simulation=False, fps = 50, render_size = 256, suffix='avi'):\n # states: [state, ....]\n # state: (60, )\n imgs = []\n for _, state in tqdm(enumerate(states), desc='Saving video'):\n # set_trace()\n env_id = state[-1].long()\n env.set_states(state.unsqueeze(0))\n img = env.render(rgb=True,img_size=render_size)[env_id]\n imgs.append(img.cpu().numpy())\n if suffix == 'gif':\n from PIL import Image\n images_to_gif(save_path+f'.{suffix}', [Image.fromarray(img[:, :, ::-1], mode='RGB') for img in imgs], fps=len(imgs)//5)\n else:\n batch_imgs = np.stack(imgs, axis=0)\n images_to_video(save_path+f'.{suffix}', batch_imgs, fps, (render_size, render_size))" }, { "identifier": "get_dict_key", "path": "utils/utils.py", "snippet": "def get_dict_key(dic, value):\n key = list(dic.keys())[list(dic.values()).index(value)]\n return key" }, { "identifier": "DexDataset", "path": "utils/utils.py", "snippet": "class DexDataset(Dataset):\n def __init__(self, dataset):\n self.dataset = dataset\n self.data_ot_idx = {}\n # set_trace()\n self.data_dim = self.dataset.shape[1]\n self.data_ot = {}\n obj_id = 0\n\n for (idx,data) in enumerate(self.dataset):\n # set_trace()\n data_id = data[3104]\n # print(data_id)\n if data_id in self.data_ot_idx:\n self.data_ot_idx[data_id].append(idx)\n else:\n self.data_ot_idx[data_id] = [idx]\n self.data_ot[obj_id] = data_id\n obj_id+=1\n \n # set_trace()\n self.data_grasp_num = np.zeros(len(self.data_ot_idx))\n for (i,data_ot_idx_each) in enumerate(self.data_ot_idx):\n # set_trace()\n self.data_grasp_num[i] = len(self.data_ot_idx[data_ot_idx_each])\n \n print('data initilized!')\n\n # need to overload\n def __len__(self):\n return len(self.data_ot_idx)\n\n # need to overload\n def __getitem__(self, idx):\n # sampled_data = np.zeros(len(idx),self.data_dim)\n # set_trace()\n sampled_idx = np.random.randint(0, self.data_grasp_num[idx])\n # print(idx,sampled_idx)\n sampled_data = self.dataset[self.data_ot_idx[self.data_ot[idx]][sampled_idx]]\n # set_trace()\n return sampled_data" } ]
import isaacgym import condexenvs import argparse import functools import sys import os import cv2 import numpy as np import tqdm import time import pickle import random import torch import torch.optim as optim from ipdb import set_trace from tensorboardX import SummaryWriter from torch.utils.data import DataLoader from torchvision.utils import make_grid from Algorithms.SDE_update import loss_fn_cond, cond_ode_sampler, init_sde, ExponentialMovingAverage from Networks.SDENets_update import CondScoreModel from utils.utils import exists_or_mkdir, save_video, get_dict_key, DexDataset
8,098
# mode parser.add_argument('--model_name', type=str, default='fine', metavar='NAME', help="the name of the model (default: fine") parser.add_argument('--quick', action='store_true', help="test on small cases") parser.add_argument('--train_model', action='store_true', help="train model") parser.add_argument('--con', action='store_true', help="continue train the given model") parser.add_argument('--demo_gen', action='store_true', help="demo gen mode") parser.add_argument('--demo_nums', type=int, default=8, help='total demo nums') parser.add_argument('--demo_name', type=str, default='small_test', help='demo names') parser.add_argument('--space', type=str, default='riemann', help='angle space') parser.add_argument('--eval_demo_name', type=str, default='small_test', help='demo names') parser.add_argument('--constrained', action='store_false', help="whether constrain base") parser.add_argument('--gt', action='store_true', help="gt mode") parser.add_argument('--device_id', type=int, default=0, help='device_id') # tensorboard parser.add_argument("--log_dir", type=str, default='gf_overfit') parser.add_argument("--pt_version", type=str, default='pt2') args = parser.parse_args() device = f'cuda:{args.device_id}' ''' make env ''' num_envs = args.num_envs # 53 envs = condexenvs.make( seed=args.seed, task="ShadowHandCon", num_envs=num_envs, sim_device=device, rl_device=device, graphics_device_id = args.device_id, virtual_screen_capture=False, headless=args.gui, force_render = False, mode = args.mode, num_run_envs = args.num_run_envs, method = args.method, dataset_type = args.dataset_type, ) envs.reset(env_init=True) print(args) # set_trace() ''' seed ''' np.random.seed(args.seed) torch.manual_seed(args.seed) torch.set_num_threads(4) random.seed(args.seed) ''' logging ''' exists_or_mkdir('./logs') ckpt_path = f'./logs/{args.log_dir}/' exists_or_mkdir(ckpt_path) tb_path = f'./logs/{args.log_dir}/tb' exists_or_mkdir(tb_path) writer = SummaryWriter(tb_path) ''' create train dataset and dataloader ''' dataset_path = f'./ExpertDatasets/grasp_data/ground/{args.demo_name}.pth' assert os.path.exists(dataset_path), 'Dataset not found!' with open(dataset_path, 'rb') as f: data_samples = pickle.load(f) print(len(data_samples)) ''' eval ''' eval_dataset_path = f'./ExpertDatasets/grasp_data/ground/{args.eval_demo_name}_rc.pth' assert os.path.exists(eval_dataset_path), 'Eval Dataset not found!' with open(eval_dataset_path, 'rb') as f: eval_data_samples = pickle.load(f) eval_dataset_ot_path = f'./ExpertDatasets/grasp_data/ground/{args.eval_demo_name}_rc_ot.pth' assert os.path.exists(eval_dataset_ot_path), 'Eval Dataset oti not found!' with open(eval_dataset_ot_path, 'rb') as f: eval_data_ot = pickle.load(f) # change data object type id eval_dataset_oti_path = f'./ExpertDatasets/grasp_data/ground/{args.eval_demo_name}_oti.pth' with open(eval_dataset_oti_path, 'rb') as f: eval_data_oti = pickle.load(f) for (i, data) in enumerate(eval_data_samples): env_id_in_full = int(data[25+points_per_object*3+7:25+points_per_object*3+8]) object_type = get_dict_key(eval_data_oti, env_id_in_full) env_id_in_current = envs.obj_type_id[object_type] eval_data_samples[i,3104] = env_id_in_current eval_dataset = torch.tensor(eval_data_samples, device=device) eval_dataset = eval_dataset.reshape(-1, eval_dataset.shape[-1]) args.num_envs = len(eval_data_ot) num_envs = len(eval_data_ot) test_per_object = int(len(eval_dataset)/num_envs) eval_demo_number = len(eval_data_samples) total_data_number = len(data_samples) # augment demos if total_data_number < args.demo_nums: new_data_samples = data_samples for i in range(args.demo_nums - total_data_number): new_data_samples = np.vstack((new_data_samples,data_samples[i%total_data_number])) dataset = new_data_samples else: dataset = data_samples dataset = dataset[: args.demo_nums] dataset = dataset.reshape(-1, dataset.shape[-1]) # set_trace() if 'all' in dataset_path: print('balance data') dataset = DexDataset(dataset) print(len(dataset)) if args.relative: dataloader = DataLoader(dataset=dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, drop_last=True) else: dataloader = DataLoader(dataset=dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, drop_last=True) ''' SDE ''' # init SDE config
#!/usr/bin/env python sys.path.append(os.path.dirname(os.path.dirname(__file__))) points_per_object = 1024 vis_image = False max_bz = 256 if __name__ == "__main__": parser = argparse.ArgumentParser() ''' configurator ''' # score matching parameter parser.add_argument('--test_decay', type=str, default='False') parser.add_argument('--score_mode', type=str, default='target') parser.add_argument('--sde_mode', type=str, default='vp') # ['ve', 'vp', 'subvp'] parser.add_argument('--sde_min', type=float, default=0.1) parser.add_argument('--sde_max', type=float, default=10.0) parser.add_argument('--n_epoches', type=int, default=50000) parser.add_argument('--eval_freq', type=int, default=1000) parser.add_argument('--eval_times', type=int, default=5) parser.add_argument('--batch_size', type=int, default=64) parser.add_argument('--lr', type=float, default=2e-4) parser.add_argument('--t0', type=float, default=0.5) parser.add_argument('--ema_rate', type=float, default=0.999) parser.add_argument('--repeat_num', type=int, default=1) parser.add_argument('--warmup', type=int, default=100) parser.add_argument('--grad_clip', type=float, default=1.) parser.add_argument('--beta1', type=float, default=0.9) parser.add_argument('--test_ratio', type=float, default=0.1) parser.add_argument('--base_noise_scale', type=float, default=0.01) parser.add_argument('--workers', type=int, default=4) parser.add_argument('--hidden_dim', type=int, default=1024) parser.add_argument('--embed_dim', type=int, default=512) parser.add_argument('--seed', type=int, default=0) parser.add_argument('--relative', action='store_true', help="relative obj pcl state") # env parser.add_argument('--num_envs', type=int, default=4, help='total env nums') parser.add_argument('--num_run_envs', type=int, default=1, help='running env nums') parser.add_argument('--max_episode_steps', type=int, default=200, help='step numbers for each episode') parser.add_argument("--method", type=str, default='filter') parser.add_argument('--gui', action='store_false', help="enable gui") parser.add_argument("--mode", type=str, default='eval') parser.add_argument("--dataset_type", type=str, default='train') # mode parser.add_argument('--model_name', type=str, default='fine', metavar='NAME', help="the name of the model (default: fine") parser.add_argument('--quick', action='store_true', help="test on small cases") parser.add_argument('--train_model', action='store_true', help="train model") parser.add_argument('--con', action='store_true', help="continue train the given model") parser.add_argument('--demo_gen', action='store_true', help="demo gen mode") parser.add_argument('--demo_nums', type=int, default=8, help='total demo nums') parser.add_argument('--demo_name', type=str, default='small_test', help='demo names') parser.add_argument('--space', type=str, default='riemann', help='angle space') parser.add_argument('--eval_demo_name', type=str, default='small_test', help='demo names') parser.add_argument('--constrained', action='store_false', help="whether constrain base") parser.add_argument('--gt', action='store_true', help="gt mode") parser.add_argument('--device_id', type=int, default=0, help='device_id') # tensorboard parser.add_argument("--log_dir", type=str, default='gf_overfit') parser.add_argument("--pt_version", type=str, default='pt2') args = parser.parse_args() device = f'cuda:{args.device_id}' ''' make env ''' num_envs = args.num_envs # 53 envs = condexenvs.make( seed=args.seed, task="ShadowHandCon", num_envs=num_envs, sim_device=device, rl_device=device, graphics_device_id = args.device_id, virtual_screen_capture=False, headless=args.gui, force_render = False, mode = args.mode, num_run_envs = args.num_run_envs, method = args.method, dataset_type = args.dataset_type, ) envs.reset(env_init=True) print(args) # set_trace() ''' seed ''' np.random.seed(args.seed) torch.manual_seed(args.seed) torch.set_num_threads(4) random.seed(args.seed) ''' logging ''' exists_or_mkdir('./logs') ckpt_path = f'./logs/{args.log_dir}/' exists_or_mkdir(ckpt_path) tb_path = f'./logs/{args.log_dir}/tb' exists_or_mkdir(tb_path) writer = SummaryWriter(tb_path) ''' create train dataset and dataloader ''' dataset_path = f'./ExpertDatasets/grasp_data/ground/{args.demo_name}.pth' assert os.path.exists(dataset_path), 'Dataset not found!' with open(dataset_path, 'rb') as f: data_samples = pickle.load(f) print(len(data_samples)) ''' eval ''' eval_dataset_path = f'./ExpertDatasets/grasp_data/ground/{args.eval_demo_name}_rc.pth' assert os.path.exists(eval_dataset_path), 'Eval Dataset not found!' with open(eval_dataset_path, 'rb') as f: eval_data_samples = pickle.load(f) eval_dataset_ot_path = f'./ExpertDatasets/grasp_data/ground/{args.eval_demo_name}_rc_ot.pth' assert os.path.exists(eval_dataset_ot_path), 'Eval Dataset oti not found!' with open(eval_dataset_ot_path, 'rb') as f: eval_data_ot = pickle.load(f) # change data object type id eval_dataset_oti_path = f'./ExpertDatasets/grasp_data/ground/{args.eval_demo_name}_oti.pth' with open(eval_dataset_oti_path, 'rb') as f: eval_data_oti = pickle.load(f) for (i, data) in enumerate(eval_data_samples): env_id_in_full = int(data[25+points_per_object*3+7:25+points_per_object*3+8]) object_type = get_dict_key(eval_data_oti, env_id_in_full) env_id_in_current = envs.obj_type_id[object_type] eval_data_samples[i,3104] = env_id_in_current eval_dataset = torch.tensor(eval_data_samples, device=device) eval_dataset = eval_dataset.reshape(-1, eval_dataset.shape[-1]) args.num_envs = len(eval_data_ot) num_envs = len(eval_data_ot) test_per_object = int(len(eval_dataset)/num_envs) eval_demo_number = len(eval_data_samples) total_data_number = len(data_samples) # augment demos if total_data_number < args.demo_nums: new_data_samples = data_samples for i in range(args.demo_nums - total_data_number): new_data_samples = np.vstack((new_data_samples,data_samples[i%total_data_number])) dataset = new_data_samples else: dataset = data_samples dataset = dataset[: args.demo_nums] dataset = dataset.reshape(-1, dataset.shape[-1]) # set_trace() if 'all' in dataset_path: print('balance data') dataset = DexDataset(dataset) print(len(dataset)) if args.relative: dataloader = DataLoader(dataset=dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, drop_last=True) else: dataloader = DataLoader(dataset=dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, drop_last=True) ''' SDE ''' # init SDE config
prior_fn, marginal_prob_fn, sde_fn = init_sde(args.sde_mode, min=args.sde_min, max=args.sde_max)
2
2023-11-09 06:08:40+00:00
12k
ApolloAuto/apollo-model-centerpoint
paddle3d/transforms/reader.py
[ { "identifier": "manager", "path": "paddle3d/apis/manager.py", "snippet": "class ComponentManager:\n def __init__(self, *, name: str, description: str = ''):\n def __len__(self):\n def __repr__(self):\n def __getitem__(self, item: str):\n def components_dict(self) -> dict:\n def name(self) -> str:\n def description(self) -> str:\n def _add_single_component(self, component: Callable):\n def add_component(self, components: Union[Callable, Iterable[Callable]]\n ) -> Union[Callable, Iterable[Callable]]:\nVOXEL_ENCODERS = ComponentManager(name=\"voxel_encoders\")\nMIDDLE_ENCODERS = ComponentManager(name=\"middle_encoders\")\nBACKBONES = ComponentManager(name=\"backbones\")\nMODELS = ComponentManager(name=\"models\")\nNECKS = ComponentManager(name=\"necks\")\nHEADS = ComponentManager(name=\"heads\")\nLOSSES = ComponentManager(name=\"losses\")\nDATASETS = ComponentManager(name=\"datasets\")\nTRANSFORMS = ComponentManager(name=\"transforms\")\nLR_SCHEDULERS = ComponentManager(name=\"lr_schedulers\")\nOPTIMIZERS = ComponentManager(name=\"optimizers\")\nVOXELIZERS = ComponentManager(name=\"voxelizers\")\nPOINT_ENCODERS = ComponentManager(name=\"point_encoders\")\nPOSITIONAL_ENCODING = ComponentManager(name=\"POSITIONAL_ENCODING\")\nTRANSFORMERS = ComponentManager(name=\"TRANSFORMERS\")\nTRANSFORMER_ENCODERS = ComponentManager(name=\"TRANSFORMER_ENCODERS\")\nTRANSFORMER_ENCODER_LAYERS = ComponentManager(name=\"TRANSFORMER_ENCODER_LAYERS\")\nATTENTIONS = ComponentManager(name=\"ATTENTIONS\")\nBBOX_CODERS = ComponentManager(name=\"BBOX_CODERS\")\nBBOX_ASSIGNERS = ComponentManager(name=\"BBOX_ASSIGNERS\")\nMATCH_COSTS = ComponentManager(name=\"MATCH_COSTS\")\nBBOX_SAMPLERS = ComponentManager(name=\"BBOX_SAMPLERS\")\nTRANSFORMER_DECODER_LAYERS = ComponentManager(name=\"TRANSFORMER_DECODER_LAYERS\")\nTRANSFORMER_DECODERS = ComponentManager(name=\"TRANSFORMER_DECODERS\")" }, { "identifier": "kitti_utils", "path": "paddle3d/datasets/kitti/kitti_utils.py", "snippet": "def camera_record_to_object(\n kitti_records: np.ndarray) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\ndef lidar_record_to_object(\n kitti_records: np.ndarray) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\ndef project_camera_to_velodyne(kitti_records: np.ndarray,\n calibration_info: Tuple[np.ndarray]):\ndef box_lidar_to_camera(bboxes_3d: BBoxes3D,\n calibration_info: Tuple[np.ndarray]):\ndef coord_camera_to_velodyne(points: np.ndarray,\n calibration_info: Tuple[np.ndarray]):\ndef coord_velodyne_to_camera(points: np.ndarray, calibration_info: np.ndarray):\ndef project_velodyne_to_camera(pointcloud: np.ndarray,\n calibration_info: np.ndarray, image_shape):\ndef assess_object_difficulties(kitti_records: np.ndarray,\n min_height_thresh: List = [40, 25, 25],\n max_occlusion_thresh: List = [0, 1, 2],\n max_truncation_thresh: List = [0.15, 0.3, 0.5]):\ndef projection_matrix_decomposition(proj):\ndef filter_fake_result(detection: Sample):\ndef get_objects_from_label(label_file):\ndef cls_type_to_id(cls_type):\n def __init__(self, line):\n def get_kitti_obj_level(self):\n def generate_corners3d(self):\n def to_str(self):\n def to_kitti_format(self):\n def __init__(self, calib_dict):\n def cart_to_hom(self, pts):\n def rect_to_lidar(self, pts_rect):\n def lidar_to_rect(self, pts_lidar):\n def rect_to_img(self, pts_rect):\n def lidar_to_img(self, pts_lidar):\n def img_to_rect(self, u, v, depth_rect):\n def corners3d_to_img_boxes(self, corners3d):\n V2C = np.eye(4)\n V2C[:3, :4] = calibration_info[5]\n V2C = np.eye(4)\n V2C[:3, :4] = calibration_info[5]\n V2C = np.eye(4)\n V2C[:3, :4] = calibration_info[5]\n P2 = np.eye(4)\n P2[:3, :4] = calibration_info[2]\n CR = proj[0:3, 0:3]\n CT = proj[0:3, 3]\n C = np.linalg.inv(Cinv)\n T = Cinv @ CT\n R = np.array([[np.cos(self.ry), 0, np.sin(self.ry)], [0, 1, 0],\n [-np.sin(self.ry), 0,\n np.cos(self.ry)]])\nclass Object3d(object):\nclass Calibration(object):" }, { "identifier": "SemanticKITTIDataset", "path": "paddle3d/datasets/semantic_kitti/semantic_kitti.py", "snippet": "class SemanticKITTIDataset(BaseDataset):\n \"\"\"\n SemanticKITTI dataset.\n\n Class attributes (`LABELS`, `LEARNING_MAP`, `LEARNING_MAP_INV`, `CONTENT`,\n `LEARNING_IGNORE`, `SEQUENCE_SPLITS`) are from SemanticKITTI dataset official\n configuration. Please refer to:\n <https://github.com/PRBonn/semantic-kitti-api/blob/master/config/semantic-kitti-all.yaml>.\n\n Args:\n dataset_root (str): Path to the root directory of SemanticKITTI dataset.\n mode (str, optional): The mode of dataset. Default is 'train'.\n sequences (list or tuple, optional): The data sequences of dataset.\n If None, use default sequence splits according to `mode`. Default is None.\n transforms (TransformABC or list[TransformABC], optional): The transforms of dataset. Default is None.\n \"\"\"\n\n LABELS = {\n 0: \"unlabeled\",\n 1: \"outlier\",\n 10: \"car\",\n 11: \"bicycle\",\n 13: \"bus\",\n 15: \"motorcycle\",\n 16: \"on-rails\",\n 18: \"truck\",\n 20: \"other-vehicle\",\n 30: \"person\",\n 31: \"bicyclist\",\n 32: \"motorcyclist\",\n 40: \"road\",\n 44: \"parking\",\n 48: \"sidewalk\",\n 49: \"other-ground\",\n 50: \"building\",\n 51: \"fence\",\n 52: \"other-structure\",\n 60: \"lane-marking\",\n 70: \"vegetation\",\n 71: \"trunk\",\n 72: \"terrain\",\n 80: \"pole\",\n 81: \"traffic-sign\",\n 99: \"other-object\",\n 252: \"moving-car\",\n 253: \"moving-bicyclist\",\n 254: \"moving-person\",\n 255: \"moving-motorcyclist\",\n 256: \"moving-on-rails\",\n 257: \"moving-bus\",\n 258: \"moving-truck\",\n 259: \"moving-other-vehicle\"\n }\n\n LEARNING_MAP = {\n 0: 0, # \"unlabeled\"\n 1: 0, # \"outlier\" mapped to \"unlabeled\" ------------------------mapped\n 10: 1, # \"car\"\n 11: 2, # \"bicycle\"\n 13: 5, # \"bus\" mapped to \"other-vehicle\" ------------------------mapped\n 15: 3, # \"motorcycle\"\n 16: 5, # \"on-rails\" mapped to \"other-vehicle\" -------------------mapped\n 18: 4, # \"truck\"\n 20: 5, # \"other-vehicle\"\n 30: 6, # \"person\"\n 31: 7, # \"bicyclist\"\n 32: 8, # \"motorcyclist\"\n 40: 9, # \"road\"\n 44: 10, # \"parking\"\n 48: 11, # \"sidewalk\"\n 49: 12, # \"other-ground\"\n 50: 13, # \"building\"\n 51: 14, # \"fence\"\n 52: 0, # \"other-structure\" mapped to \"unlabeled\" ----------------mapped\n 60: 9, # \"lane-marking\" to \"road\" -------------------------------mapped\n 70: 15, # \"vegetation\"\n 71: 16, # \"trunk\"\n 72: 17, # \"terrain\"\n 80: 18, # \"pole\"\n 81: 19, # \"traffic-sign\"\n 99: 0, # \"other-object\" to \"unlabeled\" --------------------------mapped\n 252:\n 1, # \"moving-car\" to \"car\" ----------------------------------mapped\n 253:\n 7, # \"moving-bicyclist\" to \"bicyclist\" ----------------------mapped\n 254:\n 6, # \"moving-person\" to \"person\" ----------------------------mapped\n 255:\n 8, # \"moving-motorcyclist\" to \"motorcyclist\" ----------------mapped\n 256:\n 5, # \"moving-on-rails\" mapped to \"other-vehicle\" ------------mapped\n 257:\n 5, # \"moving-bus\" mapped to \"other-vehicle\" -----------------mapped\n 258:\n 4, # \"moving-truck\" to \"truck\" ------------------------------mapped\n 259:\n 5, # \"moving-other\"-vehicle to \"other-vehicle\" --------------mapped\n }\n\n LEARNING_MAP_INV = { # inverse of previous map\n 0: 0, # \"unlabeled\", and others ignored\n 1: 10, # \"car\"\n 2: 11, # \"bicycle\"\n 3: 15, # \"motorcycle\"\n 4: 18, # \"truck\"\n 5: 20, # \"other-vehicle\"\n 6: 30, # \"person\"\n 7: 31, # \"bicyclist\"\n 8: 32, # \"motorcyclist\"\n 9: 40, # \"road\"\n 10: 44, # \"parking\"\n 11: 48, # \"sidewalk\"\n 12: 49, # \"other-ground\"\n 13: 50, # \"building\"\n 14: 51, # \"fence\"\n 15: 70, # \"vegetation\"\n 16: 71, # \"trunk\"\n 17: 72, # \"terrain\"\n 18: 80, # \"pole\"\n 19: 81, # \"traffic-sign\"\n }\n\n CONTENT = { # as a ratio with the total number of points\n 0: 0.018889854628292943,\n 1: 0.0002937197336781505,\n 10: 0.040818519255974316,\n 11: 0.00016609538710764618,\n 13: 2.7879693665067774e-05,\n 15: 0.00039838616015114444,\n 16: 0.0,\n 18: 0.0020633612104619787,\n 20: 0.0016218197275284021,\n 30: 0.00017698551338515307,\n 31: 1.1065903904919655e-08,\n 32: 5.532951952459828e-09,\n 40: 0.1987493871255525,\n 44: 0.014717169549888214,\n 48: 0.14392298360372,\n 49: 0.0039048553037472045,\n 50: 0.1326861944777486,\n 51: 0.0723592229456223,\n 52: 0.002395131480328884,\n 60: 4.7084144280367186e-05,\n 70: 0.26681502148037506,\n 71: 0.006035012012626033,\n 72: 0.07814222006271769,\n 80: 0.002855498193863172,\n 81: 0.0006155958086189918,\n 99: 0.009923127583046915,\n 252: 0.001789309418528068,\n 253: 0.00012709999297008662,\n 254: 0.00016059776092534436,\n 255: 3.745553104802113e-05,\n 256: 0.0,\n 257: 0.00011351574470342043,\n 258: 0.00010157861367183268,\n 259: 4.3840131989471124e-05,\n }\n\n LEARNING_IGNORE = {\n 0: True, # \"unlabeled\", and others ignored\n 1: False, # \"car\"\n 2: False, # \"bicycle\"\n 3: False, # \"motorcycle\"\n 4: False, # \"truck\"\n 5: False, # \"other-vehicle\"\n 6: False, # \"person\"\n 7: False, # \"bicyclist\"\n 8: False, # \"motorcyclist\"\n 9: False, # \"road\"\n 10: False, # \"parking\"\n 11: False, # \"sidewalk\"\n 12: False, # \"other-ground\"\n 13: False, # \"building\"\n 14: False, # \"fence\"\n 15: False, # \"vegetation\"\n 16: False, # \"trunk\"\n 17: False, # \"terrain\"\n 18: False, # \"pole\"\n 19: False, # \"traffic-sign\"\n }\n\n SEQUENCE_SPLITS = {\n 'train': (0, 1, 2, 3, 4, 5, 6, 7, 9, 10),\n 'val': (8, ),\n 'test': (11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21)\n }\n\n def __init__(self,\n dataset_root: str,\n mode: str = \"train\",\n sequences: Union[List[int], Tuple[int], None] = None,\n transforms: Union[TransformABC, List[TransformABC]] = None):\n super().__init__()\n self.mode = mode\n\n if isinstance(transforms, list):\n transforms = T.Compose(transforms)\n\n self.transforms = transforms\n\n if self.mode not in ['train', 'val', 'trainval', 'test']:\n raise ValueError(\n \"mode should be 'train', 'val', 'trainval' or 'test', but got {}.\"\n .format(self.mode))\n\n if sequences is not None:\n self.sequences = sequences\n else:\n self.sequences = self.SEQUENCE_SPLITS[self.mode]\n\n # get file list\n self.data = []\n for seq in self.sequences:\n seq_dir = os.path.join(dataset_root, 'sequences', '{0:02d}'.format(\n int(seq)))\n scans = sorted(glob(os.path.join(seq_dir, 'velodyne', '*.bin')))\n self.data.extend(scans)\n\n def __len__(self):\n return len(self.data)\n\n @staticmethod\n def build_remap_lut():\n \"\"\"\n Make lookup table for mapping\n \"\"\"\n\n maxkey = max(SemanticKITTIDataset.LEARNING_MAP.keys())\n\n # +100 hack making lut bigger just in case there are unknown labels\n remap_lut = np.zeros((maxkey + 100), dtype=np.int32)\n remap_lut[list(SemanticKITTIDataset.LEARNING_MAP.keys())] = list(\n SemanticKITTIDataset.LEARNING_MAP.values())\n\n return remap_lut\n\n @property\n def name(self) -> str:\n return \"SemanticKITTI\"\n\n @property\n def labels(self) -> List[str]:\n num_classes = len(self.LEARNING_MAP_INV)\n class_names = [\n self.LABELS[self.LEARNING_MAP_INV[i]] for i in range(num_classes)\n ]\n return class_names" }, { "identifier": "PointCloud", "path": "paddle3d/geometries/pointcloud.py", "snippet": "class PointCloud(_Structure):\n def __init__(self, data: np.ndarray):\n if not isinstance(data, np.ndarray):\n data = np.array(data)\n\n if data.ndim != 2 and data.ndim != 3:\n # When the data expands in 8 directions, the data.ndim is 3\n # [-1, 3] --> [-1, 8, 3]\n # 7 -------- 4\n # /| /|\n # 6 -------- 5 .\n # | | | |\n # . 3 -------- 0\n # |/ |/\n # 2 -------- 1\n raise ValueError(\n 'Illegal PointCloud data with number of dim {}'.format(\n data.ndim))\n\n if data.shape[-1] < 3:\n raise ValueError('Illegal PointCloud data with shape {}'.format(\n data.shape))\n\n def scale(self, factor: float):\n \"\"\"\n \"\"\"\n self[..., :3] = self[..., :3] * factor\n\n def translate(self, translation: np.ndarray):\n \"\"\"\n \"\"\"\n self[..., :3] = self[..., :3] + translation\n\n def rotate_around_z(self, angle: np.ndarray):\n \"\"\"\n \"\"\"\n # Rotation matrix around the z-axis\n rot_sin = np.sin(angle)\n rot_cos = np.cos(angle)\n if self.ndim == 2:\n rotation_matrix = np.array(\n [[rot_cos, -rot_sin, 0], [rot_sin, rot_cos, 0], [0, 0, 1]],\n dtype=self.dtype)\n elif self.ndim == 3:\n zeros = np.zeros(self.shape[0])\n ones = np.ones(self.shape[0])\n rotation_matrix = np.array(\n [[rot_cos, -rot_sin, zeros], [rot_sin, rot_cos, zeros],\n [zeros, zeros, ones]],\n dtype=self.dtype)\n rotation_matrix = rotation_matrix.reshape([-1, 3, 3])\n\n # Rotate x,y,z\n self[..., :3] = self[..., :3] @ rotation_matrix\n\n def flip(self, axis: int):\n \"\"\"\n \"\"\"\n if axis not in [0, 1]:\n raise ValueError(\n \"Flip axis should be 0 or 1, but recieved is {}\".format(axis))\n if axis == 0: # flip along x-axis\n self[:, 1] = -self[:, 1]\n elif axis == 1: # flip along y-axis\n self[:, 0] = -self[:, 0]\n\n def shuffle(self):\n self[...] = np.random.permutation(\n self[...]) # permutation is fater than shuffle\n\n def get_mask_of_points_outside_range(self, limit_range):\n mask = (self[:, 0] >= limit_range[0]) & (self[:, 0] <= limit_range[3]) \\\n & (self[:, 1] >= limit_range[1]) & (self[:, 1] <= limit_range[4])\n return mask" }, { "identifier": "points_in_convex_polygon_3d_jit", "path": "paddle3d/geometries/bbox.py", "snippet": "def points_in_convex_polygon_3d_jit(points, polygon_surfaces,\n num_surfaces=None):\n \"\"\"\n Check points is in 3d convex polygons.\n\n Args:\n points: [num_points, 3] array.\n polygon_surfaces: [num_polygon, max_num_surfaces,\n max_num_points_of_surface, 3]\n array. all surfaces' normal vector must direct to internal.\n max_num_points_of_surface must at least 3.\n num_surfaces: [num_polygon] array. indicate how many surfaces\n a polygon contain\n Returns:\n [num_points, num_polygon] bool array.\n \"\"\"\n\n num_polygons = polygon_surfaces.shape[0]\n if num_surfaces is None:\n num_surfaces = np.full((num_polygons, ), 9999999, dtype=np.int64)\n normal_vec, d = surface_equ_3d_jit(polygon_surfaces[:, :, :3, :])\n # normal_vec: [num_polygon, max_num_surfaces, 3]\n # d: [num_polygon, max_num_surfaces]\n return _points_in_convex_polygon_3d_jit(points, polygon_surfaces,\n normal_vec, d, num_surfaces)" }, { "identifier": "Sample", "path": "paddle3d/sample.py", "snippet": "class Sample(_EasyDict):\n \"\"\"\n \"\"\"\n _VALID_MODALITIES = [\"image\", \"lidar\", \"radar\", \"multimodal\", \"multiview\"]\n\n def __init__(self, path: str, modality: str):\n if modality not in self._VALID_MODALITIES:\n raise ValueError('Only modality {} is supported, but got {}'.format(\n self._VALID_MODALITIES, modality))\n\n self.meta = SampleMeta()\n\n self.path = path\n self.data = None\n self.modality = modality.lower()\n\n self.bboxes_2d = None\n self.bboxes_3d = None\n self.labels = None\n\n self.sweeps = []\n self.attrs = None" }, { "identifier": "functional", "path": "paddle3d/transforms/functional.py", "snippet": "def horizontal_flip(im: np.ndarray) -> np.ndarray:\ndef vertical_flip(im: np.ndarray) -> np.ndarray:\ndef normalize(im: np.ndarray, mean: Tuple[float, float, float],\n std: Tuple[float, float, float]) -> np.ndarray:\ndef normalize_use_cv2(im: np.ndarray,\n mean: np.ndarray,\n std: np.ndarray,\n to_rgb=True):\ndef get_frustum(im_bbox, C, near_clip=0.001, far_clip=100):\ndef corner_to_surface_normal(corners):\ndef points_to_voxel(points, voxel_size, point_cloud_range, grid_size, voxels,\n coords, num_points_per_voxel, grid_idx_to_voxel_idx,\n max_points_in_voxel, max_voxel_num):\ndef create_anchors_3d_stride(feature_size,\n sizes=[1.6, 3.9, 1.56],\n anchor_strides=[0.4, 0.4, 0.0],\n anchor_offsets=[0.2, -39.8, -1.78],\n rotations=[0, np.pi / 2]):\ndef sparse_sum_for_anchors_mask(coors, shape):\ndef fused_get_anchors_area(dense_map, anchors_bv, stride, offset, grid_size):\ndef noise_per_box(bev_boxes, corners_2d, ignored_corners_2d, rotation_noises,\n translation_noises):\ndef perturb_object_points_(points, centers, point_masks, rotation_noises,\n translation_noises):\ndef perturb_object_bboxes_3d_(bboxes_3d, rotation_noises, translation_noises):\ndef nearest_iou_similarity(bboxes_3d_1, bboxes_3d_2):\ndef random_depth_image_horizontal(data_dict=None):\ndef blend_transform(img: np.ndarray, src_image: np.ndarray, src_weight: float,\n dst_weight: float):\ndef sample_point(sample, num_points):\n N = anchors_bv.shape[0]\n ID = dense_map[anchor_coor[3], anchor_coor[2]]\n IA = dense_map[anchor_coor[1], anchor_coor[0]]\n IB = dense_map[anchor_coor[3], anchor_coor[0]]\n IC = dense_map[anchor_coor[1], anchor_coor[2]]\n W = image.shape[1]" }, { "identifier": "TransformABC", "path": "paddle3d/transforms/base.py", "snippet": "class TransformABC(abc.ABC):\n @abc.abstractmethod\n def __call__(self, sample: Sample):\n \"\"\"\n \"\"\"" }, { "identifier": "logger", "path": "paddle3d/utils/logger.py", "snippet": "class Logger(object):\nclass ProgressBar(object):\n def __init__(self, name: str = None):\n def format(self):\n def disable(self):\n def enable(self):\n def enabled(self) -> bool:\n def __call__(self, log_level: str, msg: str):\n def use_terminator(self, terminator: str):\n def processing(self, msg: str, flush_interval: float = 0.1):\n def _printer():\n def progressbar(self, msg: str, flush_interval: float = 0.1):\n def range(self, stop: int, msg: str):\n def enumerate(self, iterable: Iterable, msg: str):\n def __init__(self, logger: Logger, flush_interval: float = 0.1):\n def update(self, progress: float):" } ]
import os import cv2 import numpy as np from pathlib import Path from typing import List, Union from PIL import Image from paddle3d.apis import manager from paddle3d.datasets.kitti import kitti_utils from paddle3d.datasets.semantic_kitti.semantic_kitti import \ SemanticKITTIDataset from paddle3d.geometries import PointCloud from paddle3d.geometries.bbox import points_in_convex_polygon_3d_jit from paddle3d.sample import Sample from paddle3d.transforms import functional as F from paddle3d.transforms.base import TransformABC from paddle3d.utils.logger import logger
9,127
@manager.TRANSFORMS.add_component class LoadSemanticKITTIRange(TransformABC): """ Load SemanticKITTI range image. Please refer to <https://github.com/PRBonn/semantic-kitti-api/blob/master/auxiliary/laserscan.py>. Args: project_label (bool, optional): Whether project label to range view or not. """ def __init__(self, project_label=True): self.project_label = project_label self.proj_H = 64 self.proj_W = 1024 self.upper_inclination = 3. / 180. * np.pi self.lower_inclination = -25. / 180. * np.pi self.fov = self.upper_inclination - self.lower_inclination self.remap_lut = SemanticKITTIDataset.build_remap_lut() def _remap_semantic_labels(self, sem_label): """ Remap semantic labels to cross entropy format. Please refer to <https://github.com/PRBonn/semantic-kitti-api/blob/master/remap_semantic_labels.py>. """ return self.remap_lut[sem_label] def __call__(self, sample: Sample) -> Sample: raw_scan = np.fromfile(sample.path, dtype=np.float32).reshape((-1, 4)) points = raw_scan[:, 0:3] remissions = raw_scan[:, 3] # get depth of all points (L-2 norm of [x, y, z]) depth = np.linalg.norm(points, ord=2, axis=1) # get angles of all points scan_x = points[:, 0] scan_y = points[:, 1] scan_z = points[:, 2] yaw = -np.arctan2(scan_y, scan_x) pitch = np.arcsin(scan_z / depth) # get projections in image coords proj_x = 0.5 * (yaw / np.pi + 1.0) # in [0.0, 1.0] proj_y = 1.0 - ( pitch + abs(self.lower_inclination)) / self.fov # in [0.0, 1.0] # scale to image size using angular resolution proj_x *= self.proj_W # in [0.0, W] proj_y *= self.proj_H # in [0.0, H] # round and clamp for use as index proj_x = np.floor(proj_x) proj_x = np.minimum(self.proj_W - 1, proj_x) proj_x = np.maximum(0, proj_x).astype(np.int32) # in [0,W-1] proj_x_copy = np.copy( proj_x ) # save a copy in original order, for each point, where it is in the range image proj_y = np.floor(proj_y) proj_y = np.minimum(self.proj_H - 1, proj_y) proj_y = np.maximum(0, proj_y).astype(np.int32) # in [0,H-1] proj_y_copy = np.copy( proj_y ) # save a copy in original order, for each point, where it is in the range image # unproj_range_copy = np.copy(depth) # copy of depth in original order # order in decreasing depth indices = np.arange(depth.shape[0]) order = np.argsort(depth)[::-1] depth = depth[order] indices = indices[order] points = points[order] remission = remissions[order] proj_y = proj_y[order] proj_x = proj_x[order] # projected range image - [H,W] range (-1 is no data) proj_range = np.full((self.proj_H, self.proj_W), -1, dtype=np.float32) # projected point cloud xyz - [H,W,3] xyz coord (-1 is no data) proj_xyz = np.full((self.proj_H, self.proj_W, 3), -1, dtype=np.float32) # projected remission - [H,W] intensity (-1 is no data) proj_remission = np.full((self.proj_H, self.proj_W), -1, dtype=np.float32) # projected index (for each pixel, what I am in the pointcloud) # [H,W] index (-1 is no data) proj_idx = np.full((self.proj_H, self.proj_W), -1, dtype=np.int32) proj_range[proj_y, proj_x] = depth proj_xyz[proj_y, proj_x] = points proj_remission[proj_y, proj_x] = remission proj_idx[proj_y, proj_x] = indices proj_mask = proj_idx > 0 # mask containing for each pixel, if it contains a point or not sample.data = np.concatenate([ proj_range[None, ...], proj_xyz.transpose([2, 0, 1]), proj_remission[None, ...] ]) sample.meta["proj_mask"] = proj_mask.astype(np.float32) sample.meta["proj_x"] = proj_x_copy sample.meta["proj_y"] = proj_y_copy if sample.labels is not None: # load labels raw_label = np.fromfile( sample.labels, dtype=np.uint32).reshape((-1)) # only fill in attribute if the right size if raw_label.shape[0] == points.shape[0]: sem_label = raw_label & 0xFFFF # semantic label in lower half sem_label = self._remap_semantic_labels(sem_label) # inst_label = raw_label >> 16 # instance id in upper half else:
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __all__ = [ "LoadImage", "LoadPointCloud", "RemoveCameraInvisiblePointsKITTI", "RemoveCameraInvisiblePointsKITTIV2", "LoadSemanticKITTIRange" ] @manager.TRANSFORMS.add_component class LoadImage(TransformABC): """ """ _READER_MAPPER = {"cv2": cv2.imread, "pillow": Image.open} def __init__(self, to_chw: bool = True, to_rgb: bool = True, reader: str = "cv2"): if reader not in self._READER_MAPPER.keys(): raise ValueError('Unsupported reader {}'.format(reader)) self.reader = reader self.to_rgb = to_rgb self.to_chw = to_chw def __call__(self, sample: Sample) -> Sample: """ """ sample.data = np.array(self._READER_MAPPER[self.reader](sample.path)) sample.meta.image_reader = self.reader sample.meta.image_format = "bgr" if self.reader == "cv2" else "rgb" sample.meta.channel_order = "hwc" if sample.meta.image_format != "rgb" and self.to_rgb: if sample.meta.image_format == "bgr": sample.data = cv2.cvtColor(sample.data, cv2.COLOR_BGR2RGB) sample.meta.image_format = "rgb" else: raise RuntimeError('Unsupported image format {}'.format( sample.meta.image_format)) elif sample.meta.image_format != "bgr" and (self.to_rgb is False): if sample.meta.image_format == "rgb": sample.data = sample.data[:, :, ::-1] sample.meta.image_format = "bgr" else: raise RuntimeError('Unsupported image format {}'.format( sample.meta.image_format)) if self.to_chw: sample.data = sample.data.transpose((2, 0, 1)) sample.meta.channel_order = "chw" return sample @manager.TRANSFORMS.add_component class LoadPointCloud(TransformABC): """ Load point cloud. Args: dim: The dimension of each point. use_dim: The dimension of each point to use. use_time_lag: Whether to use time lag. sweep_remove_radius: The radius within which points are removed in sweeps. """ def __init__(self, dim, use_dim: Union[int, List[int]] = None, use_time_lag: bool = False, sweep_remove_radius: float = 1, sep: str = ''): self.dim = dim self.use_dim = range(use_dim) if isinstance(use_dim, int) else use_dim self.use_time_lag = use_time_lag self.sweep_remove_radius = sweep_remove_radius self.sep = sep def __call__(self, sample: Sample): """ """ if sample.modality != "lidar": raise ValueError('{} Only Support samples in modality lidar'.format( self.__class__.__name__)) if sample.data is not None: raise ValueError( 'The data for this sample has been processed before.') data = np.fromfile(sample.path, np.float32, sep=self.sep).reshape(-1, self.dim) if self.use_dim is not None: data = data[:, self.use_dim] if self.use_time_lag: time_lag = np.zeros((data.shape[0], 1), dtype=data.dtype) data = np.hstack([data, time_lag]) if len(sample.sweeps) > 0: data_sweep_list = [ data, ] for i in np.random.choice( len(sample.sweeps), len(sample.sweeps), replace=False): sweep = sample.sweeps[i] sweep_data = np.fromfile(sweep.path, np.float32).reshape( -1, self.dim) if self.use_dim: sweep_data = sweep_data[:, self.use_dim] sweep_data = sweep_data.T # Remove points that are in a certain radius from origin. x_filter_mask = np.abs( sweep_data[0, :]) < self.sweep_remove_radius y_filter_mask = np.abs( sweep_data[1, :]) < self.sweep_remove_radius not_close = np.logical_not( np.logical_and(x_filter_mask, y_filter_mask)) sweep_data = sweep_data[:, not_close] # Homogeneous transform of current sample to reference coordinate if sweep.meta.ref_from_curr is not None: sweep_data[:3, :] = sweep.meta.ref_from_curr.dot( np.vstack((sweep_data[:3, :], np.ones(sweep_data.shape[1]))))[:3, :] sweep_data = sweep_data.T if self.use_time_lag: curr_time_lag = sweep.meta.time_lag * np.ones( (sweep_data.shape[0], 1)).astype(sweep_data.dtype) sweep_data = np.hstack([sweep_data, curr_time_lag]) data_sweep_list.append(sweep_data) data = np.concatenate(data_sweep_list, axis=0) sample.data = PointCloud(data) return sample @manager.TRANSFORMS.add_component class RemoveCameraInvisiblePointsKITTI(TransformABC): """ Remove camera invisible points for KITTI dataset. """ def __call__(self, sample: Sample): calibs = sample.calibs C, Rinv, T = kitti_utils.projection_matrix_decomposition(calibs[2]) im_path = (Path(sample.path).parents[1] / "image_2" / Path( sample.path).stem).with_suffix(".png") if os.path.exists(im_path): im_shape = cv2.imread(str(im_path)).shape[:2] else: im_shape = (375, 1242) im_shape = np.array(im_shape, dtype=np.int32) im_bbox = [0, 0, im_shape[1], im_shape[0]] frustum = F.get_frustum(im_bbox, C) frustum = (Rinv @ (frustum - T).T).T frustum = kitti_utils.coord_camera_to_velodyne(frustum, calibs) frustum_normals = F.corner_to_surface_normal(frustum[None, ...]) indices = points_in_convex_polygon_3d_jit(sample.data[:, :3], frustum_normals) sample.data = sample.data[indices.reshape([-1])] return sample @manager.TRANSFORMS.add_component class RemoveCameraInvisiblePointsKITTIV2(TransformABC): """ Remove camera invisible points for KITTI dataset, unlike `RemoveCameraInvisiblePointsKITTI` which projects image plane to a frustum, this version projects poinst into image plane and remove the points outside the image boundary. """ def __init__(self): self.V2C = None self.R0 = None def __call__(self, sample: Sample): calibs = sample.calibs self.R0 = calibs[4] self.V2C = calibs[5] self.P2 = calibs[2] im_path = (Path(sample.path).parents[1] / "image_2" / Path( sample.path).stem).with_suffix(".png") if os.path.exists(im_path): im_shape = cv2.imread(str(im_path)).shape[:2] else: im_shape = (375, 1242) im_shape = np.array(im_shape, dtype=np.int32) pts = sample.data[:, 0:3] # lidar to rect pts_lidar_hom = self.cart_to_hom(pts) pts_rect = np.dot(pts_lidar_hom, np.dot(self.V2C.T, self.R0.T)) # rect to img pts_img, pts_rect_depth = self.rect_to_img(pts_rect) val_flag_1 = np.logical_and(pts_img[:, 0] >= 0, pts_img[:, 0] < im_shape[1]) val_flag_2 = np.logical_and(pts_img[:, 1] >= 0, pts_img[:, 1] < im_shape[0]) val_flag_merge = np.logical_and(val_flag_1, val_flag_2) pts_valid_flag = np.logical_and(val_flag_merge, pts_rect_depth >= 0) sample.data = sample.data[pts_valid_flag] return sample def cart_to_hom(self, pts): pts_hom = np.hstack((pts, np.ones((pts.shape[0], 1), dtype=np.float32))) return pts_hom def rect_to_img(self, pts_rect): pts_rect_hom = self.cart_to_hom(pts_rect) pts_2d_hom = np.dot(pts_rect_hom, self.P2.T) pts_img = (pts_2d_hom[:, 0:2].T / pts_rect_hom[:, 2]).T # (N, 2) pts_rect_depth = pts_2d_hom[:, 2] - self.P2.T[ 3, 2] # depth in rect camera coord return pts_img, pts_rect_depth @manager.TRANSFORMS.add_component class LoadSemanticKITTIRange(TransformABC): """ Load SemanticKITTI range image. Please refer to <https://github.com/PRBonn/semantic-kitti-api/blob/master/auxiliary/laserscan.py>. Args: project_label (bool, optional): Whether project label to range view or not. """ def __init__(self, project_label=True): self.project_label = project_label self.proj_H = 64 self.proj_W = 1024 self.upper_inclination = 3. / 180. * np.pi self.lower_inclination = -25. / 180. * np.pi self.fov = self.upper_inclination - self.lower_inclination self.remap_lut = SemanticKITTIDataset.build_remap_lut() def _remap_semantic_labels(self, sem_label): """ Remap semantic labels to cross entropy format. Please refer to <https://github.com/PRBonn/semantic-kitti-api/blob/master/remap_semantic_labels.py>. """ return self.remap_lut[sem_label] def __call__(self, sample: Sample) -> Sample: raw_scan = np.fromfile(sample.path, dtype=np.float32).reshape((-1, 4)) points = raw_scan[:, 0:3] remissions = raw_scan[:, 3] # get depth of all points (L-2 norm of [x, y, z]) depth = np.linalg.norm(points, ord=2, axis=1) # get angles of all points scan_x = points[:, 0] scan_y = points[:, 1] scan_z = points[:, 2] yaw = -np.arctan2(scan_y, scan_x) pitch = np.arcsin(scan_z / depth) # get projections in image coords proj_x = 0.5 * (yaw / np.pi + 1.0) # in [0.0, 1.0] proj_y = 1.0 - ( pitch + abs(self.lower_inclination)) / self.fov # in [0.0, 1.0] # scale to image size using angular resolution proj_x *= self.proj_W # in [0.0, W] proj_y *= self.proj_H # in [0.0, H] # round and clamp for use as index proj_x = np.floor(proj_x) proj_x = np.minimum(self.proj_W - 1, proj_x) proj_x = np.maximum(0, proj_x).astype(np.int32) # in [0,W-1] proj_x_copy = np.copy( proj_x ) # save a copy in original order, for each point, where it is in the range image proj_y = np.floor(proj_y) proj_y = np.minimum(self.proj_H - 1, proj_y) proj_y = np.maximum(0, proj_y).astype(np.int32) # in [0,H-1] proj_y_copy = np.copy( proj_y ) # save a copy in original order, for each point, where it is in the range image # unproj_range_copy = np.copy(depth) # copy of depth in original order # order in decreasing depth indices = np.arange(depth.shape[0]) order = np.argsort(depth)[::-1] depth = depth[order] indices = indices[order] points = points[order] remission = remissions[order] proj_y = proj_y[order] proj_x = proj_x[order] # projected range image - [H,W] range (-1 is no data) proj_range = np.full((self.proj_H, self.proj_W), -1, dtype=np.float32) # projected point cloud xyz - [H,W,3] xyz coord (-1 is no data) proj_xyz = np.full((self.proj_H, self.proj_W, 3), -1, dtype=np.float32) # projected remission - [H,W] intensity (-1 is no data) proj_remission = np.full((self.proj_H, self.proj_W), -1, dtype=np.float32) # projected index (for each pixel, what I am in the pointcloud) # [H,W] index (-1 is no data) proj_idx = np.full((self.proj_H, self.proj_W), -1, dtype=np.int32) proj_range[proj_y, proj_x] = depth proj_xyz[proj_y, proj_x] = points proj_remission[proj_y, proj_x] = remission proj_idx[proj_y, proj_x] = indices proj_mask = proj_idx > 0 # mask containing for each pixel, if it contains a point or not sample.data = np.concatenate([ proj_range[None, ...], proj_xyz.transpose([2, 0, 1]), proj_remission[None, ...] ]) sample.meta["proj_mask"] = proj_mask.astype(np.float32) sample.meta["proj_x"] = proj_x_copy sample.meta["proj_y"] = proj_y_copy if sample.labels is not None: # load labels raw_label = np.fromfile( sample.labels, dtype=np.uint32).reshape((-1)) # only fill in attribute if the right size if raw_label.shape[0] == points.shape[0]: sem_label = raw_label & 0xFFFF # semantic label in lower half sem_label = self._remap_semantic_labels(sem_label) # inst_label = raw_label >> 16 # instance id in upper half else:
logger.error("Point cloud shape: {}".format(points.shape))
8
2023-11-08 07:08:03+00:00
12k
camlsys/fl-project-template
project/main.py
[ { "identifier": "get_client_generator", "path": "project/client/client.py", "snippet": "def get_client_generator(\n working_dir: Path,\n net_generator: NetGen,\n dataloader_gen: ClientDataloaderGen,\n train: TrainFunc,\n test: TestFunc,\n) -> ClientGen:\n \"\"\"Return a function which creates a new Client.\n\n Client has access to the working dir,\n can generate a network and can generate a dataloader.\n The client receives train and test functions with pre-defined APIs.\n\n Parameters\n ----------\n working_dir : Path\n The path to the working directory.\n net_generator : NetGen\n The network generator.\n Please respect the pydantic schema.\n dataloader_gen : ClientDataloaderGen\n The dataloader generator.\n Uses the client id to determine partition.\n Please respect the pydantic schema.\n train : TrainFunc\n The train function.\n Please respect the interface and pydantic schema.\n test : TestFunc\n The test function.\n Please respect the interface and pydantic schema.\n\n Returns\n -------\n ClientGen\n The function which creates a new Client.\n \"\"\"\n\n def client_generator(cid: int | str) -> Client:\n \"\"\"Return a new Client.\n\n Parameters\n ----------\n cid : int | str\n The client's ID.\n\n Returns\n -------\n Client\n The new Client.\n \"\"\"\n return Client(\n cid,\n working_dir,\n net_generator,\n dataloader_gen,\n train,\n test,\n )\n\n return client_generator" }, { "identifier": "dispatch_config", "path": "project/dispatch/dispatch.py", "snippet": "def dispatch_config(cfg: DictConfig) -> ConfigStructure:\n \"\"\"Dispatch the fit/eval config functions based on on the hydra config.\n\n Functionality should be added to the dispatch.py\n file in the task folder.\n Statically specify the new dispatch function in the list,\n function order determines precedence\n if two different tasks may match the config.\n\n Parameters\n ----------\n cfg : DictConfig\n The configuration for the config function.\n Loaded dynamically from the config file.\n\n Returns\n -------\n ConfigStructure\n The config functions.\n \"\"\"\n # Create the list of task dispatches to try\n task_config_functions: list[Callable[[DictConfig], ConfigStructure | None]] = [\n dispatch_mnist_config,\n dispatch_default_config,\n ]\n\n # Match the first function which does not return None\n for task in task_config_functions:\n result = task(cfg)\n if result is not None:\n return result\n\n raise ValueError(\n f\"Unable to match the config generation functions: {cfg}\",\n )" }, { "identifier": "dispatch_data", "path": "project/dispatch/dispatch.py", "snippet": "def dispatch_data(cfg: DictConfig) -> DataStructure:\n \"\"\"Dispatch the net generator and dataloader client/fed generator functions.\n\n Functionality should be added to the dispatch.py file in the task folder.\n Statically specify the new dispatch function in the list,\n function order determines precedence if two different tasks may match the config.\n\n Parameters\n ----------\n cfg : DictConfig\n The configuration for the data function.\n Loaded dynamically from the config file.\n\n Returns\n -------\n DataStructure\n The net generator and dataloader generator functions.\n \"\"\"\n # Create the list of task dispatches to try\n task_data_dependent_functions: list[\n Callable[[DictConfig], DataStructure | None]\n ] = [\n dispatch_mnist_data,\n dispatch_default_data,\n ]\n\n # Match the first function which does not return None\n for task in task_data_dependent_functions:\n result = task(cfg)\n if result is not None:\n return result\n\n raise ValueError(\n f\"Unable to match the net generator and dataloader generator functions: {cfg}\",\n )" }, { "identifier": "dispatch_train", "path": "project/dispatch/dispatch.py", "snippet": "def dispatch_train(cfg: DictConfig) -> TrainStructure:\n \"\"\"Dispatch the train/test and fed test functions based on the config file.\n\n Functionality should be added to the dispatch.py file in the task folder.\n Statically specify the new dispatch function in the list,\n function order determines precedence if two different tasks may match the config.\n\n Parameters\n ----------\n cfg : DictConfig\n The configuration for the train function.\n Loaded dynamically from the config file.\n\n Returns\n -------\n TrainStructure\n The train function, test function and the get_fed_eval_fn function.\n \"\"\"\n # Create the list of task dispatches to try\n task_train_functions: list[Callable[[DictConfig], TrainStructure | None]] = [\n dispatch_default_train,\n dispatch_mnist_train,\n ]\n\n # Match the first function which does not return None\n for task in task_train_functions:\n result = task(cfg)\n if result is not None:\n return result\n\n raise ValueError(\n f\"Unable to match the train/test and fed_test functions: {cfg}\",\n )" }, { "identifier": "DeterministicClientManager", "path": "project/fed/server/deterministic_client_manager.py", "snippet": "class DeterministicClientManager(SimpleClientManager):\n \"\"\"A deterministic client manager.\n\n Samples clients in the same order every time based on the seed. Also allows sampling\n with replacement.\n \"\"\"\n\n def __init__(\n self,\n seed: int,\n enable_resampling: bool = False,\n ) -> None:\n \"\"\"Initialize DeterministicClientManager.\n\n Parameters\n ----------\n seed : int\n The seed to use for deterministic sampling.\n enable_resampling : bool\n Whether to allow sampling with replacement.\n\n Returns\n -------\n None\n \"\"\"\n super().__init__()\n self.seed = seed\n self.rng = random.Random(seed)\n self.enable_resampling = enable_resampling\n\n def sample(\n self,\n num_clients: int,\n min_num_clients: int | None = None,\n criterion: Criterion | None = None,\n ) -> list[ClientProxy]:\n \"\"\"Sample a number of Flower ClientProxy instances.\n\n Guarantees deterministic client sampling and enables\n sampling with replacement.\n\n Parameters\n ----------\n num_clients : int\n The number of clients to sample.\n min_num_clients : Optional[int]\n The minimum number of clients to sample.\n criterion : Optional[Criterion]\n A criterion to select clients.\n\n Returns\n -------\n List[ClientProxy]\n A list of sampled clients.\n \"\"\"\n # Block until at least num_clients are connected.\n if min_num_clients is None:\n min_num_clients = num_clients\n self.wait_for(min_num_clients)\n\n cids = list(self.clients)\n\n if criterion is not None:\n cids = [cid for cid in cids if criterion.select(self.clients[cid])]\n # Shuffle the list of clients\n\n available_cids = []\n if num_clients <= len(cids):\n available_cids = self.rng.sample(\n cids,\n num_clients,\n )\n elif self.enable_resampling:\n available_cids = self.rng.choices(\n cids,\n k=num_clients,\n )\n else:\n log(\n logging.INFO,\n \"Sampling failed: number of available clients\"\n \" (%s) is less than number of requested clients (%s).\",\n len(cids),\n num_clients,\n )\n available_cids = []\n\n client_list = [self.clients[cid] for cid in available_cids]\n log(\n logging.INFO,\n \"Sampled the following clients: %s\",\n available_cids,\n )\n return client_list" }, { "identifier": "WandbHistory", "path": "project/fed/server/wandb_history.py", "snippet": "class WandbHistory(History):\n \"\"\"History class for training and/or evaluation metrics collection.\"\"\"\n\n def __init__(self, use_wandb: bool = True) -> None:\n \"\"\"Initialize the history.\n\n Parameters\n ----------\n use_wandb : bool\n Whether to use wandb.\n Turn off to avoid communication overhead.\n\n Returns\n -------\n None\n \"\"\"\n super().__init__()\n\n self.use_wandb = use_wandb\n\n def add_loss_distributed(\n self,\n server_round: int,\n loss: float,\n ) -> None:\n \"\"\"Add one loss entry (from distributed evaluation) to history/wandb.\n\n Parameters\n ----------\n server_round : int\n The current server round.\n loss : float\n The loss to add.\n\n Returns\n -------\n None\n \"\"\"\n super().add_loss_distributed(server_round, loss)\n if self.use_wandb:\n wandb.log(\n {\"distributed_loss\": loss},\n step=server_round,\n )\n\n def add_loss_centralized(\n self,\n server_round: int,\n loss: float,\n ) -> None:\n \"\"\"Add one loss entry (from centralized evaluation) to history/wandb.\n\n Parameters\n ----------\n server_round : int\n The current server round.\n loss : float\n The loss to add.\n\n Returns\n -------\n None\n \"\"\"\n super().add_loss_centralized(server_round, loss)\n if self.use_wandb:\n wandb.log(\n {\"centralised_loss\": loss},\n step=server_round,\n )\n\n def add_metrics_distributed_fit(\n self,\n server_round: int,\n metrics: dict[str, Scalar],\n ) -> None:\n \"\"\"Add metrics entries (from distributed fit) to history/wandb.\n\n Parameters\n ----------\n server_round : int\n The current server round.\n metrics : Dict[str, Scalar]\n The metrics to add.\n\n Returns\n -------\n None\n \"\"\"\n super().add_metrics_distributed_fit(\n server_round,\n metrics,\n )\n if self.use_wandb:\n for key in metrics:\n wandb.log(\n {key: metrics[key]},\n step=server_round,\n )\n\n def add_metrics_distributed(\n self,\n server_round: int,\n metrics: dict[str, Scalar],\n ) -> None:\n \"\"\"Add metrics entries (from distributed evaluation) to history/wandb.\n\n Parameters\n ----------\n server_round : int\n The current server round.\n metrics : Dict[str, Scalar]\n The metrics to add.\n\n Returns\n -------\n None\n \"\"\"\n super().add_metrics_distributed(\n server_round,\n metrics,\n )\n if self.use_wandb:\n for key in metrics:\n wandb.log(\n {key: metrics[key]},\n step=server_round,\n )\n\n def add_metrics_centralized(\n self,\n server_round: int,\n metrics: dict[str, Scalar],\n ) -> None:\n \"\"\"Add metrics entries (from centralized evaluation) to history/wand.\n\n Parameters\n ----------\n server_round : int\n The current server round.\n metrics : Dict[str, Scalar]\n The metrics to add.\n\n Returns\n -------\n None\n \"\"\"\n super().add_metrics_centralized(\n server_round,\n metrics,\n )\n if self.use_wandb:\n for key in metrics:\n wandb.log(\n {key: metrics[key]},\n step=server_round,\n )" }, { "identifier": "WandbServer", "path": "project/fed/server/wandb_server.py", "snippet": "class WandbServer(Server):\n \"\"\"Flower server.\"\"\"\n\n def __init__(\n self,\n *,\n client_manager: ClientManager,\n strategy: Strategy | None = None,\n history: History | None = None,\n save_parameters_to_file: Callable[\n [Parameters],\n None,\n ],\n save_files_per_round: Callable[[int], None],\n ) -> None:\n \"\"\"Flower server implementation.\n\n Parameters\n ----------\n client_manager : ClientManager\n Client manager implementation.\n strategy : Optional[Strategy]\n Strategy implementation.\n history : Optional[History]\n History implementation.\n save_parameters_to_file : Callable[[Parameters], None]\n Function to save the parameters to file.\n save_files_per_round : Callable[[int], None]\n Function to save files every round.\n\n Returns\n -------\n None\n \"\"\"\n super().__init__(\n client_manager=client_manager,\n strategy=strategy,\n )\n\n self.history: History | None = history\n self.save_parameters_to_file = save_parameters_to_file\n self.save_files_per_round = save_files_per_round\n\n # pylint: disable=too-many-locals\n def fit(\n self,\n num_rounds: int,\n timeout: float | None,\n ) -> History:\n \"\"\"Run federated averaging for a number of rounds.\n\n Parameters\n ----------\n num_rounds : int\n The number of rounds to run.\n timeout : Optional[float]\n Timeout in seconds.\n\n Returns\n -------\n History\n The history of the training.\n Potentially using a pre-defined history.\n \"\"\"\n history = self.history if self.history is not None else History()\n\n # Initialize parameters\n log(INFO, \"Initializing global parameters\")\n self.parameters = self._get_initial_parameters(\n timeout=timeout,\n )\n log(INFO, \"Evaluating initial parameters\")\n res = self.strategy.evaluate(\n 0,\n parameters=self.parameters,\n )\n if res is not None:\n log(\n INFO,\n \"initial parameters (loss, other metrics): %s, %s\",\n res[0],\n res[1],\n )\n history.add_loss_centralized(\n server_round=0,\n loss=res[0],\n )\n history.add_metrics_centralized(\n server_round=0,\n metrics=res[1],\n )\n\n # Run federated learning for num_rounds\n log(INFO, \"FL starting\")\n start_time = timeit.default_timer()\n\n # Save initial parameters and files\n self.save_parameters_to_file(self.parameters)\n self.save_files_per_round(0)\n\n for current_round in range(1, num_rounds + 1):\n # Train model and replace previous global model\n res_fit = self.fit_round(\n server_round=current_round,\n timeout=timeout,\n )\n if res_fit is not None:\n (\n parameters_prime,\n fit_metrics,\n _,\n ) = res_fit # fit_metrics_aggregated\n if parameters_prime:\n self.parameters = parameters_prime\n history.add_metrics_distributed_fit(\n server_round=current_round,\n metrics=fit_metrics,\n )\n\n # Evaluate model using strategy implementation\n res_cen = self.strategy.evaluate(\n current_round,\n parameters=self.parameters,\n )\n if res_cen is not None:\n loss_cen, metrics_cen = res_cen\n log(\n INFO,\n \"fit progress: (%s, %s, %s, %s)\",\n current_round,\n loss_cen,\n metrics_cen,\n timeit.default_timer() - start_time,\n )\n history.add_loss_centralized(\n server_round=current_round,\n loss=loss_cen,\n )\n history.add_metrics_centralized(\n server_round=current_round,\n metrics=metrics_cen,\n )\n\n # Evaluate model on a sample of available clients\n res_fed = self.evaluate_round(\n server_round=current_round,\n timeout=timeout,\n )\n if res_fed is not None:\n loss_fed, evaluate_metrics_fed, _ = res_fed\n if loss_fed is not None:\n history.add_loss_distributed(\n server_round=current_round,\n loss=loss_fed,\n )\n history.add_metrics_distributed(\n server_round=current_round,\n metrics=evaluate_metrics_fed,\n )\n # Saver round parameters and files\n self.save_parameters_to_file(self.parameters)\n self.save_files_per_round(current_round)\n\n # Bookkeeping\n end_time = timeit.default_timer()\n elapsed = end_time - start_time\n log(INFO, \"FL finished in %s\", elapsed)\n return history" }, { "identifier": "get_initial_parameters", "path": "project/fed/utils/utils.py", "snippet": "def get_initial_parameters(\n net_generator: NetGen,\n config: dict,\n load_from: Path | None,\n server_round: int | None,\n) -> Parameters:\n \"\"\"Get the initial parameters for the network.\n\n Parameters\n ----------\n net_generator : NetGen\n The function to generate the network.\n config : Dict\n The configuration.\n load_from : Optional[Path]\n The path to the parameters file.\n\n Returns\n -------\n 'Parameters\n The parameters.\n \"\"\"\n if load_from is None:\n log(\n logging.INFO,\n \"Generating initial parameters with config: %s\",\n config,\n )\n return ndarrays_to_parameters(\n generic_get_parameters(net_generator(config)),\n )\n try:\n if server_round is not None:\n # Load specific round parameters\n load_from = load_from / f\"parameters_{server_round}.bin\"\n else:\n # Load only the most recent parameters\n load_from = max(\n Path(load_from).glob(\"parameters_*.bin\"),\n key=lambda f: (\n int(f.stem.split(\"_\")[1]),\n int(f.stem.split(\"_\")[2]),\n ),\n )\n\n log(\n logging.INFO,\n \"Loading initial parameters from: %s\",\n load_from,\n )\n\n return load_parameters_from_file(load_from)\n except (\n ValueError,\n FileNotFoundError,\n PermissionError,\n OSError,\n EOFError,\n IsADirectoryError,\n ):\n log(\n logging.INFO,\n f\"Loading parameters failed from: {load_from}\",\n )\n log(\n logging.INFO,\n \"Generating initial parameters with config: %s\",\n config,\n )\n\n return ndarrays_to_parameters(\n generic_get_parameters(net_generator(config)),\n )" }, { "identifier": "get_save_parameters_to_file", "path": "project/fed/utils/utils.py", "snippet": "def get_save_parameters_to_file(\n working_dir: Path,\n) -> Callable[[Parameters], None]:\n \"\"\"Get a function to save parameters to a file.\n\n Parameters\n ----------\n working_dir : Path\n The working directory.\n\n Returns\n -------\n Callable[[Parameters], None]\n A function to save parameters to a file.\n \"\"\"\n\n def save_parameters_to_file(\n parameters: Parameters,\n ) -> None:\n \"\"\"Save the parameters to a file.\n\n Parameters\n ----------\n parameters : Parameters\n The parameters to save.\n\n Returns\n -------\n None\n \"\"\"\n parameters_path = working_dir / \"parameters\"\n parameters_path.mkdir(parents=True, exist_ok=True)\n with open(\n parameters_path / \"parameters.bin\",\n \"wb\",\n ) as f:\n # Since Parameters is a list of bytes\n # save the length of each row and the data\n # for deserialization\n for data in parameters.tensors:\n # Prepend the length of the data as a 4-byte integer\n f.write(struct.pack(\"I\", len(data)))\n f.write(data)\n\n return save_parameters_to_file" }, { "identifier": "get_weighted_avg_metrics_agg_fn", "path": "project/fed/utils/utils.py", "snippet": "def get_weighted_avg_metrics_agg_fn(\n to_agg: set[str],\n) -> Callable[[list[tuple[int, dict]]], dict]:\n \"\"\"Return a function to compute a weighted average over pre-defined metrics.\n\n Parameters\n ----------\n to_agg : Set[str]\n The metrics to aggregate.\n\n Returns\n -------\n Callable[[List[Tuple[int, Dict]]], Dict]\n A function to compute a weighted average over pre-defined metrics.\n \"\"\"\n\n def weighted_avg(\n metrics: list[tuple[int, dict]],\n ) -> dict:\n \"\"\"Compute a weighted average over pre-defined metrics.\n\n Parameters\n ----------\n metrics : List[Tuple[int, Dict]]\n The metrics to aggregate.\n\n Returns\n -------\n Dict\n The weighted average over pre-defined metrics.\n \"\"\"\n total_num_examples = sum(\n [num_examples for num_examples, _ in metrics],\n )\n weighted_metrics: dict = defaultdict(float)\n for num_examples, metric in metrics:\n for key, value in metric.items():\n if key in to_agg:\n weighted_metrics[key] += num_examples * value\n\n return {\n key: value / total_num_examples for key, value in weighted_metrics.items()\n }\n\n return weighted_avg" }, { "identifier": "test_client", "path": "project/fed/utils/utils.py", "snippet": "def test_client( # noqa: PLR0917\n test_all_clients: bool,\n test_one_client: bool,\n client_generator: ClientGen,\n initial_parameters: Parameters,\n total_clients: int,\n on_fit_config_fn: OnFitConfigFN | None,\n on_evaluate_config_fn: OnEvaluateConfigFN | None,\n) -> None:\n \"\"\"Debug the client code.\n\n Avoids the complexity of Ray.\n \"\"\"\n parameters = parameters_to_ndarrays(initial_parameters)\n if test_all_clients or test_one_client:\n if test_one_client:\n client = client_generator(str(0))\n _, *res_fit = client.fit(\n parameters,\n on_fit_config_fn(0) if on_fit_config_fn else {},\n )\n res_eval = client.evaluate(\n parameters,\n on_evaluate_config_fn(0) if on_evaluate_config_fn else {},\n )\n log(\n logging.INFO,\n \"Fit debug fit: %s and eval: %s\",\n res_fit,\n res_eval,\n )\n else:\n for i in range(total_clients):\n client = client_generator(str(i))\n _, *res_fit = client.fit(\n parameters,\n on_fit_config_fn(i) if on_fit_config_fn else {},\n )\n res_eval = client.evaluate(\n parameters,\n on_evaluate_config_fn(i) if on_evaluate_config_fn else {},\n )\n log(\n logging.INFO,\n \"Fit debug fit: %s and eval: %s\",\n res_fit,\n res_eval,\n )" }, { "identifier": "ClientGen", "path": "project/types/common.py", "snippet": "" }, { "identifier": "FileSystemManager", "path": "project/utils/utils.py", "snippet": "class FileSystemManager:\n \"\"\"A context manager for saving and cleaning up files.\"\"\"\n\n def __init__( # noqa: PLR0917\n self,\n working_dir: Path,\n output_dir: Path,\n to_clean_once: list[str],\n to_save_once: list[str],\n original_hydra_dir: Path,\n reuse_output_dir: bool,\n file_limit: int | None = None,\n ) -> None:\n \"\"\"Initialize the context manager.\n\n Parameters\n ----------\n working_dir : Path\n The working directory.\n output_dir : Path\n The output directory.\n to_clean_once : List[str]\n The tokens to clean once.\n to_save_once : List[str]\n The tokens to save once.\n original_hydra_dir : Path\n The original hydra directory.\n For copying the hydra directory to the working directory.\n reuse_output_dir : bool\n Whether to reuse the output directory.\n file_limit : Optional[int]\n The maximal number of files to search.\n If None, then there is no limit.\n\n Returns\n -------\n None\n \"\"\"\n self.to_clean_once = to_clean_once\n self.working_dir = working_dir\n self.output_dir = output_dir\n self.to_save_once = to_save_once\n self.original_hydra_dir = original_hydra_dir\n self.reuse_output_dir = reuse_output_dir\n self.checkpoint_index = get_checkpoint_index(\n self.output_dir,\n file_limit,\n )\n\n def get_save_files_every_round(\n self,\n to_save: list[str],\n save_frequency: int,\n ) -> Callable[[int], None]:\n \"\"\"Get a function that saves files every save_frequency rounds.\n\n Parameters\n ----------\n to_save : List[str]\n The tokens to save.\n save_frequency : int\n The frequency to save.\n\n Returns\n -------\n Callable[[int], None]\n The function that saves the files.\n \"\"\"\n\n def save_files_round(cur_round: int) -> None:\n if cur_round % save_frequency == 0:\n save_files(\n self.working_dir,\n self.output_dir,\n to_save=to_save,\n ending=cur_round,\n checkpoint_index=self.checkpoint_index,\n )\n\n return save_files_round\n\n def __enter__(self) -> \"FileSystemManager\":\n \"\"\"Initialize the context manager and cleanup.\"\"\"\n log(\n logging.INFO,\n f\"Pre-cleaning {self.to_clean_once}\",\n )\n cleanup(self.working_dir, self.to_clean_once)\n\n return self\n\n def __exit__(\n self,\n _exc_type: type[BaseException] | None,\n _exc_value: BaseException | None,\n _traceback: TracebackType | None,\n ) -> None:\n \"\"\"Cleanup the files.\"\"\"\n log(logging.INFO, f\"Saving {self.to_save_once}\")\n\n # Copy the hydra directory to the working directory\n # so that multiple runs can be ran\n # in the same output directory and configs versioned\n hydra_dir = self.working_dir / \".hydra\"\n\n shutil.copytree(\n str(self.original_hydra_dir / \".hydra\"),\n str(object=hydra_dir),\n dirs_exist_ok=True,\n )\n\n # Move main.log to the working directory\n main_log = self.original_hydra_dir / \"main.log\"\n shutil.copy2(\n str(main_log),\n str(self.working_dir / \"main.log\"),\n )\n save_files(\n self.working_dir,\n self.output_dir,\n to_save=self.to_save_once,\n checkpoint_index=self.checkpoint_index,\n )\n log(\n logging.INFO,\n f\"Post-cleaning {self.to_clean_once}\",\n )\n cleanup(\n self.working_dir,\n to_clean=self.to_clean_once,\n )" }, { "identifier": "RayContextManager", "path": "project/utils/utils.py", "snippet": "class RayContextManager:\n \"\"\"A context manager for cleaning up after ray.\"\"\"\n\n def __enter__(self) -> \"RayContextManager\":\n \"\"\"Initialize the context manager.\"\"\"\n return self\n\n def __exit__(\n self,\n _exc_type: type[BaseException] | None,\n _exc_value: BaseException | None,\n _traceback: TracebackType | None,\n ) -> None:\n \"\"\"Cleanup the files.\n\n Parameters\n ----------\n _exc_type : Any\n The exception type.\n _exc_value : Any\n The exception value.\n _traceback : Any\n The traceback.\n\n Returns\n -------\n None\n \"\"\"\n if ray.is_initialized():\n temp_dir = Path(\n ray.worker._global_node.get_session_dir_path(),\n )\n ray.shutdown()\n directory_size = shutil.disk_usage(\n temp_dir,\n ).used\n shutil.rmtree(temp_dir)\n log(\n logging.INFO,\n f\"Cleaned up ray temp session: {temp_dir} with size: {directory_size}\",\n )" }, { "identifier": "seed_everything", "path": "project/utils/utils.py", "snippet": "def seed_everything(seed: int) -> None:\n \"\"\"Seed everything for reproducibility.\n\n Parameters\n ----------\n seed : int\n The seed.\n\n Returns\n -------\n None\n \"\"\"\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)" }, { "identifier": "wandb_init", "path": "project/utils/utils.py", "snippet": "def wandb_init(\n wandb_enabled: bool,\n *args: Any,\n **kwargs: Any,\n) -> NoOpContextManager | Any:\n \"\"\"Initialize wandb if enabled.\n\n Parameters\n ----------\n wandb_enabled : bool\n Whether wandb is enabled.\n *args : Any\n The arguments to pass to wandb.init.\n **kwargs : Any\n The keyword arguments to pass to wandb.init.\n\n Returns\n -------\n Optional[Union[NoOpContextManager, Any]]\n The wandb context manager if enabled, otherwise a no-op context manager\n \"\"\"\n if wandb_enabled:\n return wandb.init(*args, **kwargs)\n\n return NoOpContextManager()" } ]
import json import logging import os import subprocess import sys import flwr as fl import hydra import wandb from pathlib import Path from typing import cast from flwr.common.logger import log from hydra.core.hydra_config import HydraConfig from hydra.utils import instantiate from omegaconf import DictConfig, OmegaConf from project.client.client import get_client_generator from project.dispatch.dispatch import dispatch_config, dispatch_data, dispatch_train from project.fed.server.deterministic_client_manager import DeterministicClientManager from project.fed.server.wandb_history import WandbHistory from project.fed.server.wandb_server import WandbServer from project.fed.utils.utils import ( get_initial_parameters, get_save_parameters_to_file, get_weighted_avg_metrics_agg_fn, test_client, ) from project.types.common import ClientGen, FedEvalFN from project.utils.utils import ( FileSystemManager, RayContextManager, seed_everything, wandb_init, )
7,628
"""Create and connect the building blocks for your experiments; start the simulation. It includes processing the dataset, instantiate strategy, specifying how the global model will be evaluated, etc. In the end, this script saves the results. """ # Only import from the project root # Never do a relative import nor one that assumes a given folder structure # Make debugging easier when using Hydra + Ray os.environ["HYDRA_FULL_ERROR"] = "1" os.environ["OC_CAUSE"] = "1" @hydra.main( config_path="conf", config_name="base", version_base=None, ) def main(cfg: DictConfig) -> None: """Run the baseline. Parameters ---------- cfg : DictConfig An omegaconf object that stores the hydra config. """ # Print parsed config log(logging.INFO, OmegaConf.to_yaml(cfg)) wandb_config = OmegaConf.to_container( cfg, resolve=True, throw_on_missing=True, ) # Obtain the output dir from hydra original_hydra_dir = Path( hydra.utils.to_absolute_path( HydraConfig.get().runtime.output_dir, ), ) output_directory = original_hydra_dir # Reuse an output directory for checkpointing if cfg.reuse_output_dir is not None: output_directory = Path(cfg.reuse_output_dir) # The directory to save data to results_dir = output_directory / "results" results_dir.mkdir(parents=True, exist_ok=True) # Where to save files to and from if cfg.working_dir is not None: # Pre-defined directory working_dir = Path(cfg.working_dir) else: # Default directory working_dir = output_directory / "working" working_dir.mkdir(parents=True, exist_ok=True) # Wandb context manager # controlls if wandb is initialised or not # if not it returns a dummy run
"""Create and connect the building blocks for your experiments; start the simulation. It includes processing the dataset, instantiate strategy, specifying how the global model will be evaluated, etc. In the end, this script saves the results. """ # Only import from the project root # Never do a relative import nor one that assumes a given folder structure # Make debugging easier when using Hydra + Ray os.environ["HYDRA_FULL_ERROR"] = "1" os.environ["OC_CAUSE"] = "1" @hydra.main( config_path="conf", config_name="base", version_base=None, ) def main(cfg: DictConfig) -> None: """Run the baseline. Parameters ---------- cfg : DictConfig An omegaconf object that stores the hydra config. """ # Print parsed config log(logging.INFO, OmegaConf.to_yaml(cfg)) wandb_config = OmegaConf.to_container( cfg, resolve=True, throw_on_missing=True, ) # Obtain the output dir from hydra original_hydra_dir = Path( hydra.utils.to_absolute_path( HydraConfig.get().runtime.output_dir, ), ) output_directory = original_hydra_dir # Reuse an output directory for checkpointing if cfg.reuse_output_dir is not None: output_directory = Path(cfg.reuse_output_dir) # The directory to save data to results_dir = output_directory / "results" results_dir.mkdir(parents=True, exist_ok=True) # Where to save files to and from if cfg.working_dir is not None: # Pre-defined directory working_dir = Path(cfg.working_dir) else: # Default directory working_dir = output_directory / "working" working_dir.mkdir(parents=True, exist_ok=True) # Wandb context manager # controlls if wandb is initialised or not # if not it returns a dummy run
with wandb_init(
15
2023-11-08 15:31:44+00:00
12k
silicx/GoldFromOres
DatasetCondensation/main.py
[ { "identifier": "get_loops", "path": "DatasetCondensation/utils.py", "snippet": "def get_loops(ipc):\r\n # Get the two hyper-parameters of outer-loop and inner-loop.\r\n # The following values are empirically good.\r\n if ipc == 1:\r\n outer_loop, inner_loop = 1, 1\r\n elif ipc == 10:\r\n outer_loop, inner_loop = 10, 50\r\n elif ipc == 20:\r\n outer_loop, inner_loop = 20, 25\r\n elif ipc == 30:\r\n outer_loop, inner_loop = 30, 20\r\n elif ipc == 40:\r\n outer_loop, inner_loop = 40, 15\r\n elif ipc == 50:\r\n outer_loop, inner_loop = 50, 10\r\n else:\r\n outer_loop, inner_loop = 0, 0\r\n exit('loop hyper-parameters are not defined for %d ipc'%ipc)\r\n return outer_loop, inner_loop\r" }, { "identifier": "get_dataset", "path": "DatasetCondensation/utils.py", "snippet": "def get_dataset(dataset, data_path):\r\n if dataset == 'MNIST':\r\n channel = 1\r\n im_size = (28, 28)\r\n num_classes = 10\r\n mean = [0.1307]\r\n std = [0.3081]\r\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])\r\n dst_train = datasets.MNIST(data_path, train=True, download=True, transform=transform) # no augmentation\r\n dst_test = datasets.MNIST(data_path, train=False, download=True, transform=transform)\r\n class_names = [str(c) for c in range(num_classes)]\r\n\r\n elif dataset == 'FashionMNIST':\r\n channel = 1\r\n im_size = (28, 28)\r\n num_classes = 10\r\n mean = [0.2861]\r\n std = [0.3530]\r\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])\r\n dst_train = datasets.FashionMNIST(data_path, train=True, download=True, transform=transform) # no augmentation\r\n dst_test = datasets.FashionMNIST(data_path, train=False, download=True, transform=transform)\r\n class_names = dst_train.classes\r\n\r\n elif dataset == 'SVHN':\r\n channel = 3\r\n im_size = (32, 32)\r\n num_classes = 10\r\n mean = [0.4377, 0.4438, 0.4728]\r\n std = [0.1980, 0.2010, 0.1970]\r\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])\r\n dst_train = datasets.SVHN(data_path, split='train', download=True, transform=transform) # no augmentation\r\n dst_test = datasets.SVHN(data_path, split='test', download=True, transform=transform)\r\n class_names = [str(c) for c in range(num_classes)]\r\n\r\n elif dataset == 'CIFAR10':\r\n channel = 3\r\n im_size = (32, 32)\r\n num_classes = 10\r\n mean = [0.4914, 0.4822, 0.4465]\r\n std = [0.2023, 0.1994, 0.2010]\r\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])\r\n dst_train = datasets.CIFAR10(data_path, train=True, download=True, transform=transform) # no augmentation\r\n dst_test = datasets.CIFAR10(data_path, train=False, download=True, transform=transform)\r\n class_names = dst_train.classes\r\n\r\n elif dataset == 'CIFAR100':\r\n channel = 3\r\n im_size = (32, 32)\r\n num_classes = 100\r\n mean = [0.5071, 0.4866, 0.4409]\r\n std = [0.2673, 0.2564, 0.2762]\r\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])\r\n dst_train = datasets.CIFAR100(data_path, train=True, download=True, transform=transform) # no augmentation\r\n dst_test = datasets.CIFAR100(data_path, train=False, download=True, transform=transform)\r\n class_names = dst_train.classes\r\n\r\n elif dataset == 'TinyImageNet':\r\n channel = 3\r\n im_size = (64, 64)\r\n num_classes = 200\r\n mean = [0.485, 0.456, 0.406]\r\n std = [0.229, 0.224, 0.225]\r\n data = torch.load(os.path.join(data_path, 'tinyimagenet.pt'), map_location='cpu')\r\n\r\n class_names = data['classes']\r\n\r\n images_train = data['images_train']\r\n labels_train = data['labels_train']\r\n images_train = images_train.detach().float() / 255.0\r\n labels_train = labels_train.detach()\r\n for c in range(channel):\r\n images_train[:,c] = (images_train[:,c] - mean[c])/std[c]\r\n dst_train = TensorDataset(images_train, labels_train) # no augmentation\r\n\r\n images_val = data['images_val']\r\n labels_val = data['labels_val']\r\n images_val = images_val.detach().float() / 255.0\r\n labels_val = labels_val.detach()\r\n\r\n for c in range(channel):\r\n images_val[:, c] = (images_val[:, c] - mean[c]) / std[c]\r\n\r\n dst_test = TensorDataset(images_val, labels_val) # no augmentation\r\n\r\n else:\r\n exit('unknown dataset: %s'%dataset)\r\n\r\n\r\n testloader = torch.utils.data.DataLoader(dst_test, batch_size=256, shuffle=False, num_workers=0)\r\n return channel, im_size, num_classes, class_names, mean, std, dst_train, dst_test, testloader\r" }, { "identifier": "get_network", "path": "DatasetCondensation/utils.py", "snippet": "def get_network(model, channel, num_classes, im_size=(32, 32)):\r\n torch.random.manual_seed(int(time.time() * 1000) % 100000)\r\n net_width, net_depth, net_act, net_norm, net_pooling = get_default_convnet_setting()\r\n\r\n if model == 'MLP':\r\n net = MLP(channel=channel, num_classes=num_classes)\r\n elif model == 'ConvNet':\r\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\r\n elif model == 'LeNet':\r\n net = LeNet(channel=channel, num_classes=num_classes)\r\n elif model == 'AlexNet':\r\n net = AlexNet(channel=channel, num_classes=num_classes)\r\n elif model == 'AlexNetBN':\r\n net = AlexNetBN(channel=channel, num_classes=num_classes)\r\n elif model == 'VGG11':\r\n net = VGG11( channel=channel, num_classes=num_classes)\r\n elif model == 'VGG11BN':\r\n net = VGG11BN(channel=channel, num_classes=num_classes)\r\n elif model == 'ResNet18':\r\n net = ResNet18(channel=channel, num_classes=num_classes)\r\n elif model == 'ResNet18BN_AP':\r\n net = ResNet18BN_AP(channel=channel, num_classes=num_classes)\r\n elif model == 'ResNet18BN':\r\n net = ResNet18BN(channel=channel, num_classes=num_classes)\r\n\r\n elif model == 'ConvNetD1':\r\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=1, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\r\n elif model == 'ConvNetD2':\r\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=2, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\r\n elif model == 'ConvNetD3':\r\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=3, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\r\n elif model == 'ConvNetD4':\r\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=4, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\r\n\r\n elif model == 'ConvNetW32':\r\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=32, net_depth=net_depth, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\r\n elif model == 'ConvNetW64':\r\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=64, net_depth=net_depth, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\r\n elif model == 'ConvNetW128':\r\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=128, net_depth=net_depth, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\r\n elif model == 'ConvNetW256':\r\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=256, net_depth=net_depth, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\r\n\r\n elif model == 'ConvNetAS':\r\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act='sigmoid', net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\r\n elif model == 'ConvNetAR':\r\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act='relu', net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\r\n elif model == 'ConvNetAL':\r\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act='leakyrelu', net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\r\n elif model == 'ConvNetASwish':\r\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act='swish', net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\r\n elif model == 'ConvNetASwishBN':\r\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act='swish', net_norm='batchnorm', net_pooling=net_pooling, im_size=im_size)\r\n\r\n elif model == 'ConvNetNN':\r\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act=net_act, net_norm='none', net_pooling=net_pooling, im_size=im_size)\r\n elif model == 'ConvNetBN':\r\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act=net_act, net_norm='batchnorm', net_pooling=net_pooling, im_size=im_size)\r\n elif model == 'ConvNetLN':\r\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act=net_act, net_norm='layernorm', net_pooling=net_pooling, im_size=im_size)\r\n elif model == 'ConvNetIN':\r\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act=net_act, net_norm='instancenorm', net_pooling=net_pooling, im_size=im_size)\r\n elif model == 'ConvNetGN':\r\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act=net_act, net_norm='groupnorm', net_pooling=net_pooling, im_size=im_size)\r\n\r\n elif model == 'ConvNetNP':\r\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act=net_act, net_norm=net_norm, net_pooling='none', im_size=im_size)\r\n elif model == 'ConvNetMP':\r\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act=net_act, net_norm=net_norm, net_pooling='maxpooling', im_size=im_size)\r\n elif model == 'ConvNetAP':\r\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act=net_act, net_norm=net_norm, net_pooling='avgpooling', im_size=im_size)\r\n\r\n else:\r\n net = None\r\n exit('unknown model: %s'%model)\r\n\r\n gpu_num = torch.cuda.device_count()\r\n if gpu_num>0:\r\n device = 'cuda'\r\n if gpu_num>1:\r\n net = nn.DataParallel(net)\r\n else:\r\n device = 'cpu'\r\n net = net.to(device)\r\n\r\n return net\r" }, { "identifier": "get_eval_pool", "path": "DatasetCondensation/utils.py", "snippet": "def get_eval_pool(eval_mode, model, model_eval):\r\n if eval_mode == 'M': # multiple architectures\r\n model_eval_pool = ['MLP', 'ConvNet', 'LeNet', 'AlexNet', 'VGG11', 'ResNet18']\r\n elif eval_mode == 'B': # multiple architectures with BatchNorm for DM experiments\r\n model_eval_pool = ['ConvNetBN', 'ConvNetASwishBN', 'AlexNetBN', 'VGG11BN', 'ResNet18BN']\r\n elif eval_mode == 'W': # ablation study on network width\r\n model_eval_pool = ['ConvNetW32', 'ConvNetW64', 'ConvNetW128', 'ConvNetW256']\r\n elif eval_mode == 'D': # ablation study on network depth\r\n model_eval_pool = ['ConvNetD1', 'ConvNetD2', 'ConvNetD3', 'ConvNetD4']\r\n elif eval_mode == 'A': # ablation study on network activation function\r\n model_eval_pool = ['ConvNetAS', 'ConvNetAR', 'ConvNetAL', 'ConvNetASwish']\r\n elif eval_mode == 'P': # ablation study on network pooling layer\r\n model_eval_pool = ['ConvNetNP', 'ConvNetMP', 'ConvNetAP']\r\n elif eval_mode == 'N': # ablation study on network normalization layer\r\n model_eval_pool = ['ConvNetNN', 'ConvNetBN', 'ConvNetLN', 'ConvNetIN', 'ConvNetGN']\r\n elif eval_mode == 'S': # itself\r\n if 'BN' in model:\r\n print('Attention: Here I will replace BN with IN in evaluation, as the synthetic set is too small to measure BN hyper-parameters.')\r\n model_eval_pool = [model[:model.index('BN')]] if 'BN' in model else [model]\r\n elif eval_mode == 'SS': # itself\r\n model_eval_pool = [model]\r\n else:\r\n model_eval_pool = [model_eval]\r\n return model_eval_pool\r" }, { "identifier": "evaluate_synset", "path": "DatasetCondensation/utils.py", "snippet": "def evaluate_synset(it_eval, net, images_train, labels_train, testloader, args):\r\n net = net.to(args.device)\r\n images_train = images_train.to(args.device)\r\n labels_train = labels_train.to(args.device)\r\n lr = float(args.lr_net)\r\n Epoch = int(args.epoch_eval_train)\r\n lr_schedule = [Epoch//2+1]\r\n optimizer = torch.optim.SGD(net.parameters(), lr=lr, momentum=0.9, weight_decay=0.0005)\r\n criterion = nn.CrossEntropyLoss().to(args.device)\r\n\r\n dst_train = TensorDataset(images_train, labels_train)\r\n trainloader = torch.utils.data.DataLoader(dst_train, batch_size=args.batch_train, shuffle=True, num_workers=0)\r\n\r\n start = time.time()\r\n for ep in range(Epoch+1):\r\n loss_train, acc_train = epoch('train', trainloader, net, optimizer, criterion, args, aug = True)\r\n if ep in lr_schedule:\r\n lr *= 0.1\r\n optimizer = torch.optim.SGD(net.parameters(), lr=lr, momentum=0.9, weight_decay=0.0005)\r\n\r\n time_train = time.time() - start\r\n loss_test, acc_test = epoch('test', testloader, net, optimizer, criterion, args, aug = False)\r\n print('%s Evaluate_%02d: epoch = %04d train time = %d s train loss = %.6f train acc = %.4f, test acc = %.4f' % (get_time(), it_eval, Epoch, int(time_train), loss_train, acc_train, acc_test))\r\n\r\n return net, acc_train, acc_test\r" }, { "identifier": "get_daparam", "path": "DatasetCondensation/utils.py", "snippet": "def get_daparam(dataset, model, model_eval, ipc):\r\n # We find that augmentation doesn't always benefit the performance.\r\n # So we do augmentation for some of the settings.\r\n\r\n dc_aug_param = dict()\r\n dc_aug_param['crop'] = 4\r\n dc_aug_param['scale'] = 0.2\r\n dc_aug_param['rotate'] = 45\r\n dc_aug_param['noise'] = 0.001\r\n dc_aug_param['strategy'] = 'none'\r\n\r\n if dataset == 'MNIST':\r\n dc_aug_param['strategy'] = 'crop_scale_rotate'\r\n\r\n if model_eval in ['ConvNetBN']: # Data augmentation makes model training with Batch Norm layer easier.\r\n dc_aug_param['strategy'] = 'crop_noise'\r\n\r\n return dc_aug_param\r" }, { "identifier": "match_loss", "path": "DatasetCondensation/utils.py", "snippet": "def match_loss(gw_syn, gw_real, args):\r\n dis = torch.tensor(0.0).to(args.device)\r\n\r\n if args.dis_metric == 'ours':\r\n for ig in range(len(gw_real)):\r\n gwr = gw_real[ig]\r\n gws = gw_syn[ig]\r\n dis += distance_wb(gwr, gws)\r\n\r\n elif args.dis_metric == 'mse':\r\n gw_real_vec = []\r\n gw_syn_vec = []\r\n for ig in range(len(gw_real)):\r\n gw_real_vec.append(gw_real[ig].reshape((-1)))\r\n gw_syn_vec.append(gw_syn[ig].reshape((-1)))\r\n gw_real_vec = torch.cat(gw_real_vec, dim=0)\r\n gw_syn_vec = torch.cat(gw_syn_vec, dim=0)\r\n dis = torch.sum((gw_syn_vec - gw_real_vec)**2)\r\n\r\n elif args.dis_metric == 'cos':\r\n gw_real_vec = []\r\n gw_syn_vec = []\r\n for ig in range(len(gw_real)):\r\n gw_real_vec.append(gw_real[ig].reshape((-1)))\r\n gw_syn_vec.append(gw_syn[ig].reshape((-1)))\r\n gw_real_vec = torch.cat(gw_real_vec, dim=0)\r\n gw_syn_vec = torch.cat(gw_syn_vec, dim=0)\r\n dis = 1 - torch.sum(gw_real_vec * gw_syn_vec, dim=-1) / (torch.norm(gw_real_vec, dim=-1) * torch.norm(gw_syn_vec, dim=-1) + 0.000001)\r\n\r\n else:\r\n exit('unknown distance function: %s'%args.dis_metric)\r\n\r\n return dis\r" }, { "identifier": "get_time", "path": "DatasetCondensation/utils.py", "snippet": "def get_time():\r\n return str(time.strftime(\"[%Y-%m-%d %H:%M:%S]\", time.localtime()))\r" }, { "identifier": "TensorDataset", "path": "DatasetCondensation/utils.py", "snippet": "class TensorDataset(Dataset):\r\n def __init__(self, images, labels): # images: n x c x h x w tensor\r\n self.images = images.detach().float()\r\n self.labels = labels.detach()\r\n\r\n def __getitem__(self, index):\r\n return self.images[index], self.labels[index]\r\n\r\n def __len__(self):\r\n return self.images.shape[0]\r" }, { "identifier": "epoch", "path": "DatasetCondensation/utils.py", "snippet": "def epoch(mode, dataloader, net, optimizer, criterion, args, aug):\r\n loss_avg, acc_avg, num_exp = 0, 0, 0\r\n net = net.to(args.device)\r\n criterion = criterion.to(args.device)\r\n\r\n if mode == 'train':\r\n net.train()\r\n else:\r\n net.eval()\r\n\r\n for i_batch, datum in enumerate(dataloader):\r\n img = datum[0].float().to(args.device)\r\n if aug:\r\n if args.dsa:\r\n img = DiffAugment(img, args.dsa_strategy, param=args.dsa_param)\r\n else:\r\n img = augment(img, args.dc_aug_param, device=args.device)\r\n lab = datum[1].long().to(args.device)\r\n n_b = lab.shape[0]\r\n\r\n output = net(img)\r\n loss = criterion(output, lab)\r\n acc = np.sum(np.equal(np.argmax(output.cpu().data.numpy(), axis=-1), lab.cpu().data.numpy()))\r\n\r\n loss_avg += loss.item()*n_b\r\n acc_avg += acc\r\n num_exp += n_b\r\n\r\n if mode == 'train':\r\n optimizer.zero_grad()\r\n loss.backward()\r\n optimizer.step()\r\n\r\n loss_avg /= num_exp\r\n acc_avg /= num_exp\r\n\r\n return loss_avg, acc_avg\r" }, { "identifier": "DiffAugment", "path": "DatasetCondensation/utils.py", "snippet": "def DiffAugment(x, strategy='', seed = -1, param = None):\r\n if strategy == 'None' or strategy == 'none' or strategy == '':\r\n return x\r\n\r\n if seed == -1:\r\n param.Siamese = False\r\n else:\r\n param.Siamese = True\r\n\r\n param.latestseed = seed\r\n\r\n if strategy:\r\n if param.aug_mode == 'M': # original\r\n for p in strategy.split('_'):\r\n for f in AUGMENT_FNS[p]:\r\n x = f(x, param)\r\n elif param.aug_mode == 'S':\r\n pbties = strategy.split('_')\r\n set_seed_DiffAug(param)\r\n p = pbties[torch.randint(0, len(pbties), size=(1,)).item()]\r\n for f in AUGMENT_FNS[p]:\r\n x = f(x, param)\r\n else:\r\n exit('unknown augmentation mode: %s'%param.aug_mode)\r\n x = x.contiguous()\r\n return x\r" }, { "identifier": "ParamDiffAug", "path": "DatasetCondensation/utils.py", "snippet": "class ParamDiffAug():\r\n def __init__(self):\r\n self.aug_mode = 'S' #'multiple or single'\r\n self.prob_flip = 0.5\r\n self.ratio_scale = 1.2\r\n self.ratio_rotate = 15.0\r\n self.ratio_crop_pad = 0.125\r\n self.ratio_cutout = 0.5 # the size would be 0.5x0.5\r\n self.brightness = 1.0\r\n self.saturation = 2.0\r\n self.contrast = 0.5\r" }, { "identifier": "drop_samples", "path": "drop_utils/drop.py", "snippet": "def drop_samples(images_all, labels_all, indices_class,\n dataset: str, drop_criterion: str, \n *, drop_ratio=None, keep_ratio=None):\n \"\"\"images_all, labels_all, indices_class: the dataset structure that commonly used for DD\n dataset: (str) dataset name\n drop_criterion: (str) =`random`, or in the format of ${utility-indicator}_${order}, e.g. LossConverge_Small\n drop_ratio, keep_ratio: only one of them should be specified (drop_ratio = 1.0 - keep_ratio)\n \"\"\"\n assert (drop_ratio is None) ^ (keep_ratio is None), \\\n f\"Only one of drop_ratio ({drop_ratio}) and keep_ratio ({keep_ratio}) should be specified.\"\n \n if drop_ratio is None:\n assert keep_ratio is not None, \"I know keep_ratio must have value here! I'm muting the warning in my way.\"\n drop_ratio = 1.0 - keep_ratio\n assert 0.0 <= drop_ratio <= 1.0, str(drop_ratio)\n\n # Here's the tricky part: remember that in any case, the samples we hope to drop is sorted to the left\n # of the sequence, so we keep the `keep_ratio`% samples at right, \n # i.e. we keep the range [drop_ratio, 100%]\n \n dropped_idx_set = sample_indices_to_drop(dataset, drop_criterion, indices_class, drop_ratio, 1.0)\n\n\n # re-indexing\n \n images_all = [x for i, x in enumerate(images_all) if i not in dropped_idx_set]\n print(\"Original:\", labels_all.shape[0], \"; Now:\", len(images_all), \"remain\")\n labels_all = [x for i, x in enumerate(labels_all) if i not in dropped_idx_set]\n\n indices_class = [[] for c in range(len(indices_class))]\n for i, lab in enumerate(labels_all):\n indices_class[lab].append(i)\n\n # for i, x in enumerate(indices_class):\n # print(\"Class\", i, \"remains\", len(x), \"samples\")\n\n images_all = torch.stack(images_all, dim=0)\n labels_all = torch.tensor(labels_all, dtype=torch.long, device=images_all.device)\n torch.cuda.empty_cache()\n\n return images_all, labels_all, indices_class" } ]
import os import time import copy import argparse import numpy as np import torch import torch.nn as nn import pdb from torchvision.utils import save_image from .utils import get_loops, get_dataset, get_network, get_eval_pool, evaluate_synset, get_daparam, match_loss, get_time, TensorDataset, epoch, DiffAugment, ParamDiffAug from drop_utils import drop_samples
7,780
parser.add_argument('--method', type=str, default='DC', help='DC/DSA') parser.add_argument('--dataset', type=str, default='CIFAR10', help='dataset') parser.add_argument('--model', type=str, default='ConvNet', help='model') parser.add_argument('--ipc', type=int, default=1, help='image(s) per class') parser.add_argument('--eval_mode', type=str, default='S', help='eval_mode') # S: the same to training model, M: multi architectures, W: net width, D: net depth, A: activation function, P: pooling layer, N: normalization layer, parser.add_argument('--num_exp', type=int, default=5, help='the number of experiments') parser.add_argument('--num_eval', type=int, default=20, help='the number of evaluating randomly initialized models') parser.add_argument('--epoch_eval_train', type=int, default=300, help='epochs to train a model with synthetic data') parser.add_argument('--Iteration', type=int, default=1000, help='training iterations') parser.add_argument('--lr_img', type=float, default=0.1, help='learning rate for updating synthetic images') parser.add_argument('--lr_net', type=float, default=0.01, help='learning rate for updating network parameters') parser.add_argument('--batch_real', type=int, default=256, help='batch size for real data') parser.add_argument('--batch_train', type=int, default=256, help='batch size for training networks') parser.add_argument('--init', type=str, default='noise', help='noise/real: initialize synthetic images from random noise or randomly sampled real images.') parser.add_argument('--dsa_strategy', type=str, default='None', help='differentiable Siamese augmentation strategy') parser.add_argument('--data_path', type=str, default='data', help='dataset path') parser.add_argument('--save_path', type=str, default='result', help='path to save results') parser.add_argument('--dis_metric', type=str, default='ours', help='distance metric') parser.add_argument('--drop_criterion', type=str, default='LossConverge_large', help='Criterion for data dropping') parser.add_argument('--drop_ratio', type=float, default=0.0, help='The ratio to drop (for each class)') args = parser.parse_args() args.outer_loop, args.inner_loop = get_loops(args.ipc) args.device = 'cuda' if torch.cuda.is_available() else 'cpu' args.dsa_param = ParamDiffAug() args.dsa = True if args.method == 'DSA' else False if not os.path.exists(args.data_path): os.mkdir(args.data_path) if not os.path.exists(args.save_path): os.mkdir(args.save_path) eval_it_pool = np.arange(0, args.Iteration+1, 500).tolist() if args.eval_mode == 'S' or args.eval_mode == 'SS' else [args.Iteration] # The list of iterations when we evaluate models and record results. print('eval_it_pool: ', eval_it_pool) channel, im_size, num_classes, class_names, mean, std, dst_train, dst_test, testloader = get_dataset(args.dataset, args.data_path) model_eval_pool = get_eval_pool(args.eval_mode, args.model, args.model) accs_all_exps = dict() # record performances of all experiments for key in model_eval_pool: accs_all_exps[key] = [] data_save = [] for exp in range(args.num_exp): print('\n================== Exp %d ==================\n '%exp) print('Hyper-parameters: \n', args.__dict__) print('Evaluation model pool: ', model_eval_pool) ''' organize the real dataset ''' images_all = [] labels_all = [] indices_class = [[] for c in range(num_classes)] images_all = [torch.unsqueeze(dst_train[i][0], dim=0) for i in range(len(dst_train))] labels_all = [dst_train[i][1] for i in range(len(dst_train))] for i, lab in enumerate(labels_all): indices_class[lab].append(i) images_all = torch.cat(images_all, dim=0).to(args.device) labels_all = torch.tensor(labels_all, dtype=torch.long, device=args.device) images_all, labels_all, indices_class = drop_samples( images_all, labels_all, indices_class, args.dataset, args.drop_criterion, drop_ratio=args.drop_ratio) for c in range(num_classes): print('class c = %d: %d real images'%(c, len(indices_class[c]))) def get_images(c, n): # get random n images from class c idx_shuffle = np.random.permutation(indices_class[c])[:n] return images_all[idx_shuffle] for ch in range(channel): print('real images channel %d, mean = %.4f, std = %.4f'%(ch, torch.mean(images_all[:, ch]), torch.std(images_all[:, ch]))) ''' initialize the synthetic data ''' image_syn = torch.randn(size=(num_classes*args.ipc, channel, im_size[0], im_size[1]), dtype=torch.float, requires_grad=True, device=args.device) label_syn = torch.tensor([np.ones(args.ipc)*i for i in range(num_classes)], dtype=torch.long, requires_grad=False, device=args.device).view(-1) # [0,0,0, 1,1,1, ..., 9,9,9] if args.init == 'real': print('initialize synthetic data from random real images') for c in range(num_classes): image_syn.data[c*args.ipc:(c+1)*args.ipc] = get_images(c, args.ipc).detach().data else: print('initialize synthetic data from random noise') ''' training ''' optimizer_img = torch.optim.SGD([image_syn, ], lr=args.lr_img, momentum=0.5) # optimizer_img for synthetic data optimizer_img.zero_grad() criterion = nn.CrossEntropyLoss().to(args.device) print('%s training begins'%get_time()) for it in range(args.Iteration+1): ''' Evaluate synthetic data ''' if it in eval_it_pool: for model_eval in model_eval_pool: print('-------------------------\nEvaluation\nmodel_train = %s, model_eval = %s, iteration = %d'%(args.model, model_eval, it)) if args.dsa: args.epoch_eval_train = 1000 args.dc_aug_param = None print('DSA augmentation strategy: \n', args.dsa_strategy) print('DSA augmentation parameters: \n', args.dsa_param.__dict__) else: args.dc_aug_param = get_daparam(args.dataset, args.model, model_eval, args.ipc) # This augmentation parameter set is only for DC method. It will be muted when args.dsa is True. print('DC augmentation parameters: \n', args.dc_aug_param) if args.dsa or args.dc_aug_param['strategy'] != 'none': args.epoch_eval_train = 1000 # Training with data augmentation needs more epochs. else: args.epoch_eval_train = 300 accs = [] for it_eval in range(args.num_eval):
def main(): parser = argparse.ArgumentParser(description='Parameter Processing') parser.add_argument('--method', type=str, default='DC', help='DC/DSA') parser.add_argument('--dataset', type=str, default='CIFAR10', help='dataset') parser.add_argument('--model', type=str, default='ConvNet', help='model') parser.add_argument('--ipc', type=int, default=1, help='image(s) per class') parser.add_argument('--eval_mode', type=str, default='S', help='eval_mode') # S: the same to training model, M: multi architectures, W: net width, D: net depth, A: activation function, P: pooling layer, N: normalization layer, parser.add_argument('--num_exp', type=int, default=5, help='the number of experiments') parser.add_argument('--num_eval', type=int, default=20, help='the number of evaluating randomly initialized models') parser.add_argument('--epoch_eval_train', type=int, default=300, help='epochs to train a model with synthetic data') parser.add_argument('--Iteration', type=int, default=1000, help='training iterations') parser.add_argument('--lr_img', type=float, default=0.1, help='learning rate for updating synthetic images') parser.add_argument('--lr_net', type=float, default=0.01, help='learning rate for updating network parameters') parser.add_argument('--batch_real', type=int, default=256, help='batch size for real data') parser.add_argument('--batch_train', type=int, default=256, help='batch size for training networks') parser.add_argument('--init', type=str, default='noise', help='noise/real: initialize synthetic images from random noise or randomly sampled real images.') parser.add_argument('--dsa_strategy', type=str, default='None', help='differentiable Siamese augmentation strategy') parser.add_argument('--data_path', type=str, default='data', help='dataset path') parser.add_argument('--save_path', type=str, default='result', help='path to save results') parser.add_argument('--dis_metric', type=str, default='ours', help='distance metric') parser.add_argument('--drop_criterion', type=str, default='LossConverge_large', help='Criterion for data dropping') parser.add_argument('--drop_ratio', type=float, default=0.0, help='The ratio to drop (for each class)') args = parser.parse_args() args.outer_loop, args.inner_loop = get_loops(args.ipc) args.device = 'cuda' if torch.cuda.is_available() else 'cpu' args.dsa_param = ParamDiffAug() args.dsa = True if args.method == 'DSA' else False if not os.path.exists(args.data_path): os.mkdir(args.data_path) if not os.path.exists(args.save_path): os.mkdir(args.save_path) eval_it_pool = np.arange(0, args.Iteration+1, 500).tolist() if args.eval_mode == 'S' or args.eval_mode == 'SS' else [args.Iteration] # The list of iterations when we evaluate models and record results. print('eval_it_pool: ', eval_it_pool) channel, im_size, num_classes, class_names, mean, std, dst_train, dst_test, testloader = get_dataset(args.dataset, args.data_path) model_eval_pool = get_eval_pool(args.eval_mode, args.model, args.model) accs_all_exps = dict() # record performances of all experiments for key in model_eval_pool: accs_all_exps[key] = [] data_save = [] for exp in range(args.num_exp): print('\n================== Exp %d ==================\n '%exp) print('Hyper-parameters: \n', args.__dict__) print('Evaluation model pool: ', model_eval_pool) ''' organize the real dataset ''' images_all = [] labels_all = [] indices_class = [[] for c in range(num_classes)] images_all = [torch.unsqueeze(dst_train[i][0], dim=0) for i in range(len(dst_train))] labels_all = [dst_train[i][1] for i in range(len(dst_train))] for i, lab in enumerate(labels_all): indices_class[lab].append(i) images_all = torch.cat(images_all, dim=0).to(args.device) labels_all = torch.tensor(labels_all, dtype=torch.long, device=args.device) images_all, labels_all, indices_class = drop_samples( images_all, labels_all, indices_class, args.dataset, args.drop_criterion, drop_ratio=args.drop_ratio) for c in range(num_classes): print('class c = %d: %d real images'%(c, len(indices_class[c]))) def get_images(c, n): # get random n images from class c idx_shuffle = np.random.permutation(indices_class[c])[:n] return images_all[idx_shuffle] for ch in range(channel): print('real images channel %d, mean = %.4f, std = %.4f'%(ch, torch.mean(images_all[:, ch]), torch.std(images_all[:, ch]))) ''' initialize the synthetic data ''' image_syn = torch.randn(size=(num_classes*args.ipc, channel, im_size[0], im_size[1]), dtype=torch.float, requires_grad=True, device=args.device) label_syn = torch.tensor([np.ones(args.ipc)*i for i in range(num_classes)], dtype=torch.long, requires_grad=False, device=args.device).view(-1) # [0,0,0, 1,1,1, ..., 9,9,9] if args.init == 'real': print('initialize synthetic data from random real images') for c in range(num_classes): image_syn.data[c*args.ipc:(c+1)*args.ipc] = get_images(c, args.ipc).detach().data else: print('initialize synthetic data from random noise') ''' training ''' optimizer_img = torch.optim.SGD([image_syn, ], lr=args.lr_img, momentum=0.5) # optimizer_img for synthetic data optimizer_img.zero_grad() criterion = nn.CrossEntropyLoss().to(args.device) print('%s training begins'%get_time()) for it in range(args.Iteration+1): ''' Evaluate synthetic data ''' if it in eval_it_pool: for model_eval in model_eval_pool: print('-------------------------\nEvaluation\nmodel_train = %s, model_eval = %s, iteration = %d'%(args.model, model_eval, it)) if args.dsa: args.epoch_eval_train = 1000 args.dc_aug_param = None print('DSA augmentation strategy: \n', args.dsa_strategy) print('DSA augmentation parameters: \n', args.dsa_param.__dict__) else: args.dc_aug_param = get_daparam(args.dataset, args.model, model_eval, args.ipc) # This augmentation parameter set is only for DC method. It will be muted when args.dsa is True. print('DC augmentation parameters: \n', args.dc_aug_param) if args.dsa or args.dc_aug_param['strategy'] != 'none': args.epoch_eval_train = 1000 # Training with data augmentation needs more epochs. else: args.epoch_eval_train = 300 accs = [] for it_eval in range(args.num_eval):
net_eval = get_network(model_eval, channel, num_classes, im_size).to(args.device) # get a random model
2
2023-11-03 09:34:15+00:00
12k
gchada/ROAM
real/rail_walker_interface/environment/joystick_real.py
[ { "identifier": "WalkerEnvironment", "path": "real/rail_walker_interface/environment/env.py", "snippet": "class WalkerEnvironment:\n @property\n def robot(self) -> BaseWalker:\n pass" }, { "identifier": "JoystickEnvironment", "path": "real/rail_walker_interface/environment/env.py", "snippet": "class JoystickEnvironment:\n @property\n def joystick_policy(self) -> JoystickPolicy:\n pass\n\n def set_joystick_policy(self, joystick_policy: JoystickPolicy):\n pass\n\n @property\n def is_resetter_policy(self) -> bool:\n return False" }, { "identifier": "BaseWalker", "path": "real/rail_walker_interface/robot/robot.py", "snippet": "class BaseWalker(Generic[_ObsT]):\n def __init__(\n self, \n name: Optional[str] = \"robot\", \n Kp: float = 5,\n Kd: float = 1,\n force_real_control_timestep : bool = False,\n limit_action_range : float = 1.0,\n power_protect_factor : float = 0.1\n ):\n assert limit_action_range > 0 and limit_action_range <= 1.0\n self.name = name\n self.Kp = Kp\n self.Kd = Kd\n self.force_real_control_timestep = force_real_control_timestep\n self._last_control_t = 0.0\n self.limit_action_range = limit_action_range\n self._power_protect_factor = power_protect_factor\n\n @property\n def is_real_robot(self) -> bool:\n return False\n\n @property\n def power_protect_factor(self) -> float:\n return self._power_protect_factor\n \n @power_protect_factor.setter\n def power_protect_factor(self, value: float) -> None:\n assert value >= 0 and value <= 1.0\n self._power_protect_factor = value\n\n \"\"\"\n The control_timestep is the time interval between two consecutive model control actions.\n \"\"\"\n @property\n def control_timestep(self) -> float:\n pass\n \n @property\n def action_interpolation(self) -> bool:\n pass\n\n \"\"\"\n The control_subtimestep is the time interval between two consecutive internal control actions. It will also be the physics timestep if in simulation.\n \"\"\"\n @property\n def control_subtimestep(self) -> float:\n pass\n\n def receive_observation(self) -> bool:\n pass\n\n @property\n def joint_qpos_init(self) -> np.ndarray:\n pass\n\n @property\n def joint_qpos_sitting(self) -> np.ndarray:\n pass\n\n @cached_property\n def joint_qpos_crouch(self) -> np.ndarray:\n return (self.joint_qpos_init + self.joint_qpos_sitting) / 2.0\n\n \"\"\"\n This property will be used to determine the standing range of qpos of the robot.\n \"\"\"\n @property\n def joint_qpos_offset(self) -> np.ndarray:\n pass\n\n @property\n def joint_qpos_mins(self) -> np.ndarray:\n pass\n\n @property\n def joint_qpos_maxs(self) -> np.ndarray:\n pass\n\n def reset(self) -> None:\n pass\n\n def get_3d_linear_velocity(self) -> np.ndarray:\n pass\n\n def get_3d_local_velocity(self) -> np.ndarray:\n pass\n\n def get_3d_angular_velocity(self) -> np.ndarray:\n pass\n\n def get_framequat_wijk(self) -> np.ndarray:\n pass\n\n def get_roll_pitch_yaw(self) -> np.ndarray:\n pass\n\n def get_last_observation(self) -> Optional[_ObsT]:\n pass\n\n def get_3d_acceleration_local(self) -> np.ndarray:\n pass\n\n def get_joint_qpos(self) -> np.ndarray:\n pass\n\n def get_joint_qvel(self) -> np.ndarray:\n pass\n\n def get_joint_qacc(self) -> np.ndarray:\n pass\n\n def get_joint_torques(self) -> np.ndarray:\n pass\n\n def _apply_action(self, action: np.ndarray) -> bool:\n pass\n\n def close(self) -> None:\n pass\n\n def __del__(self):\n self.close()\n \n @property\n def action_qpos_mins(self) -> np.ndarray:\n # delta = -np.minimum(np.abs(self.joint_qpos_mins - self.joint_qpos_init), np.abs(self.joint_qpos_maxs - self.joint_qpos_init))\n # return delta * self.limit_action_range + self.joint_qpos_init\n return (self.joint_qpos_mins - self.joint_qpos_init) * self.limit_action_range + self.joint_qpos_init\n \n @property\n def action_qpos_maxs(self) -> np.ndarray:\n # delta = np.minimum(np.abs(self.joint_qpos_mins - self.joint_qpos_init), np.abs(self.joint_qpos_maxs - self.joint_qpos_init))\n # return delta * self.limit_action_range + self.joint_qpos_init\n return (self.joint_qpos_maxs - self.joint_qpos_init) * self.limit_action_range + self.joint_qpos_init\n\n def apply_action(self, action: np.ndarray) -> bool:\n action = np.clip(action, self.action_qpos_mins, self.action_qpos_maxs)\n \n if not self.force_real_control_timestep:\n return self._apply_action(action)\n else:\n t = time.time()\n dt = t - self._last_control_t\n if dt >= self.control_timestep:\n self._last_control_t = t\n return self._apply_action(action)\n else:\n time_to_sleep = self.control_timestep - dt\n time.sleep(time_to_sleep)\n self._last_control_t = t + time_to_sleep\n return self._apply_action(action)\n\n def can_apply_action(self) -> bool:\n t = time.time()\n dt = t - self._last_control_t\n if (not self.force_real_control_timestep) or dt >= self.control_timestep:\n return True\n else:\n return False\n\n def async_apply_action(self, action: np.ndarray) -> bool:\n if self.can_apply_action():\n self._last_control_t = time.time()\n return self._apply_action(action)\n else:\n return False\n\n @cached_property\n def joint_nums(self) -> int:\n return len(self.joint_qpos_init)\n \n @cached_property\n def action_spec(self) -> gym.spaces.Box:\n return gym.spaces.Box(\n low=self.joint_qpos_mins, \n high=self.joint_qpos_maxs, \n shape=(self.joint_nums,),\n dtype=np.float32\n )\n\n def unwrapped(self):\n return self" }, { "identifier": "BaseWalkerWithFootContact", "path": "real/rail_walker_interface/robot/robot.py", "snippet": "class BaseWalkerWithFootContact:\n def get_foot_contact(self) -> np.ndarray:\n pass\n\n def get_foot_force(self) -> np.ndarray:\n pass\n\n def get_foot_force_norm(self) -> np.ndarray:\n pass" }, { "identifier": "JoystickPolicy", "path": "real/rail_walker_interface/joystick_policy/joystick_policy.py", "snippet": "class JoystickPolicy:\n def __init__(\n self,\n robot: BaseWalker,\n reward_provider: JoystickPolicyRewardProvider,\n target_yaw_provider: JoystickPolicyTargetProvider,\n termination_providers: list[JoystickPolicyTerminationConditionProvider],\n truncation_providers: list[JoystickPolicyTerminationConditionProvider],\n resetters: list[JoystickPolicyResetter],\n initializers: list[JoystickPolicyResetter] = [],\n target_observable: Optional[JoystickPolicyTargetObservable] = None,\n enabled_observables : list[str] = [\n \"joints_pos\",\n \"joints_vel\",\n \"imu\",\n \"sensors_local_velocimeter\",\n \"torques\",\n \"foot_contacts\",\n ],\n lock_target: bool = False,\n enable_target_custom_obs = True\n ):\n self.robot = robot\n self.reward_provider = reward_provider\n self.target_yaw_provider = target_yaw_provider\n self.termination_providers = termination_providers\n self.truncation_providers = truncation_providers\n self.resetters = resetters\n self.initializers = initializers\n self.target_observable = target_observable\n self.enabled_observables = enabled_observables\n self.lock_target = lock_target\n self.enable_target_custom_obs = enable_target_custom_obs\n\n # Temporary Variables\n self._step_target_qpos = self.robot.get_joint_qpos()\n\n # Set up task-specific variables\n self._target_goal_world_delta = np.zeros(2)\n self._target_goal_local = np.zeros(2)\n self._target_yaw = 0.0\n self._target_delta_yaw = 0.0\n self._target_velocity = 0.0\n self._target_custom_data = None\n self._rew_step = 0.0\n self._info_dict = {}\n self._has_after_after_step = False\n self._termination_reason : Optional[JoystickPolicyTerminationConditionProvider] = None\n self._truncation_reason : Optional[JoystickPolicyTerminationConditionProvider] = None\n self._inited = False\n \n @property\n def has_after_after_step(self) -> bool:\n return self._has_after_after_step\n\n @property\n def control_timestep(self) -> float:\n return self.robot.control_timestep\n\n @control_timestep.setter\n def control_timestep(self, value: float) -> None:\n self.robot.control_timestep = value\n \n @property\n def last_info(self) -> dict[str,Any]:\n return self._info_dict.copy()\n\n @property\n def control_subtimestep(self) -> float:\n return self.robot.control_subtimestep\n \n @control_subtimestep.setter\n def control_subtimestep(self, value: float) -> None:\n self.robot.control_subtimestep = value\n\n @property\n def target_yaw(self) -> float:\n return self._target_yaw\n \n @property\n def target_delta_yaw(self) -> float:\n return self._target_delta_yaw\n\n @property\n def target_goal_world_delta(self) -> np.ndarray:\n return self._target_goal_world_delta.copy()\n \n @property\n def target_goal_local(self) -> np.ndarray:\n return self._target_goal_local.copy()\n \n @property\n def target_custom_data(self) -> Optional[Any]:\n return self._target_custom_data\n \n @property\n def target_goal_world_delta_unit(self) -> np.ndarray:\n norm_goal = np.linalg.norm(self._target_goal_world_delta)\n if norm_goal == 0.0:\n return np.zeros(2)\n else:\n return self._target_goal_world_delta / norm_goal\n \n @property\n def target_goal_local_unit(self) -> np.ndarray:\n norm_goal = np.linalg.norm(self._target_goal_local)\n if norm_goal == 0.0:\n return np.zeros(2)\n else:\n return self._target_goal_local / norm_goal\n \n\n def __update_target(self) -> float:\n new_target_goal_world_delta = self.target_yaw_provider.get_target_goal_world_delta(self.robot)[:2]\n new_target_velocity = self.target_yaw_provider.get_target_velocity(self.robot)\n _, _, yaw = self.robot.get_roll_pitch_yaw()\n inv_rotation_mat = np.array([\n [np.cos(yaw), np.sin(yaw)],\n [-np.sin(yaw), np.cos(yaw)]\n ])\n new_target_goal_local = inv_rotation_mat @ new_target_goal_world_delta\n\n new_target_yaw = np.arctan2(new_target_goal_world_delta[1], new_target_goal_world_delta[0]) if np.linalg.norm(new_target_goal_world_delta) > 0.0 else 0.0\n new_target_delta_yaw = normalize_rad(new_target_yaw - self.robot.get_roll_pitch_yaw()[2])\n change_in_abs_target_delta_yaw = self.__get_change_in_abs_target_delta_yaw()\n\n self._info_dict[\"target_yaw\"] = new_target_yaw\n self._info_dict[\"target_delta_yaw\"] = new_target_delta_yaw\n self._info_dict[\"target_goal_local_x\"] = new_target_goal_local[0]\n self._info_dict[\"target_goal_local_y\"] = new_target_goal_local[1]\n self._info_dict[\"target_goal_world_delta_x\"] = new_target_goal_world_delta[0]\n self._info_dict[\"target_goal_world_delta_y\"] = new_target_goal_world_delta[1]\n self._info_dict[\"change_in_abs_target_delta_yaw\"] = change_in_abs_target_delta_yaw\n self._info_dict[\"abs_target_delta_yaw\"] = np.abs(new_target_delta_yaw)\n self._info_dict[\"target_velocity\"] = new_target_velocity\n\n self._target_yaw = new_target_yaw\n self._target_delta_yaw = new_target_delta_yaw\n self._target_goal_local = new_target_goal_local\n self._target_goal_world_delta = new_target_goal_world_delta\n self._target_custom_data = self.target_yaw_provider.get_target_custom_data()\n self._target_velocity = new_target_velocity\n return change_in_abs_target_delta_yaw\n \n def __get_change_in_abs_target_delta_yaw(self) -> float:\n new_target_delta_yaw = normalize_rad(self.target_yaw - self.robot.get_roll_pitch_yaw()[2])\n change_in_abs_target_delta_yaw = np.abs(new_target_delta_yaw) - np.abs(self._target_delta_yaw)\n return change_in_abs_target_delta_yaw\n \n def before_step(\n self,\n action: np.ndarray,\n random_state : np.random.RandomState\n ):\n self._step_target_qpos = action\n self.robot.apply_action(action)\n\n def get_reward(\n self\n ):\n return self._rew_step\n\n def after_step(\n self,\n random_state : np.random.RandomState\n ) -> dict[str,Any]:\n self._info_dict = {}\n self.robot.receive_observation()\n\n # Update the target yaw\n self.target_yaw_provider.step_target(\n self.robot,\n self._info_dict,\n random_state\n )\n self._has_after_after_step = self.target_yaw_provider.has_target_changed()\n if not self.lock_target and self._has_after_after_step:\n change_in_abs_target_delta_yaw = self.after_after_step(\n random_state\n )\n else:\n change_in_abs_target_delta_yaw = self.__update_target()\n\n # Gather info about velocity\n robot_v = self.robot.get_3d_linear_velocity()\n robot_v_norm = np.linalg.norm(robot_v)\n robot_v_to_goal = np.dot(\n robot_v[:2], self.target_goal_world_delta_unit\n )\n robot_v_local = self.robot.get_3d_local_velocity()\n robot_rpy = self.robot.get_roll_pitch_yaw()\n self._info_dict[\"velocity_norm\"] = robot_v_norm\n self._info_dict[\"velocity_to_goal\"] = robot_v_to_goal\n self._info_dict[\"velocity_local_x\"] = robot_v_local[0]\n self._info_dict[\"velocity_local_y\"] = robot_v_local[1]\n self._info_dict[\"velocity_local_z\"] = robot_v_local[2]\n self._info_dict[\"roll\"] = robot_rpy[0]\n self._info_dict[\"pitch\"] = robot_rpy[1]\n self._info_dict[\"yaw\"] = robot_rpy[2]\n self._info_dict[\"joint_torques\"] = np.mean(np.abs(self.robot.get_joint_torques()))\n self._info_dict[\"joint_qvels\"] = np.mean(np.abs(self.robot.get_joint_qvel()))\n self._info_dict[\"joint_qaccs\"] = np.mean(np.abs(self.robot.get_joint_qacc()))\n self._info_dict[\"joint_velocities\"] = np.mean(np.abs(self.robot.get_joint_qvel()))\n if hasattr(self.robot, \"get_foot_force\"):\n foot_force : np.ndarray = self.robot.get_foot_force()\n if foot_force.shape == (4,):\n foot_force_names = [\"FR\", \"FL\", \"RR\", \"RL\"]\n else:\n foot_force_names = list(range(foot_force.shape[0]))\n for i in range(len(foot_force_names)):\n self._info_dict[\"foot_force_\" + foot_force_names[i]] = foot_force[i]\n\n self.reward_provider.step_reward(\n self.robot,\n self._step_target_qpos,\n self.target_goal_world_delta,\n self.target_goal_local,\n self.target_yaw,\n self.target_delta_yaw,\n self._target_velocity,\n robot_v_to_goal,\n change_in_abs_target_delta_yaw,\n self._target_custom_data,\n self.enable_target_custom_obs,\n self._info_dict,\n random_state\n )\n reward_perstep = self.reward_provider.get_reward()\n #assert reward_perstep is not None and reward_perstep != np.nan\n self._info_dict[\"reward_perstep\"] = reward_perstep\n self._rew_step = reward_perstep\n\n # Step the target yaw observable\n if self.target_observable is not None:\n self.target_observable.step_target_obs(\n self.robot,\n self.target_goal_world_delta,\n self.target_goal_local,\n self.target_yaw,\n self.target_delta_yaw,\n self._target_velocity,\n robot_v_to_goal,\n change_in_abs_target_delta_yaw,\n self._target_custom_data,\n self.enable_target_custom_obs,\n self._info_dict,\n random_state\n )\n \n # Step resetters\n for resetter in self.resetters:\n resetter.step_resetter(\n self.robot,\n self.target_goal_world_delta,\n self.target_goal_local,\n self.target_yaw,\n self.target_delta_yaw,\n self._target_velocity,\n robot_v_to_goal,\n change_in_abs_target_delta_yaw,\n self._target_custom_data,\n self.enable_target_custom_obs,\n self._info_dict,\n random_state\n )\n\n # Step termination providers\n for termination_provider in self.termination_providers:\n termination_provider.step_termination_condition(\n self.robot,\n self.target_goal_world_delta,\n self.target_goal_local,\n self.target_yaw,\n self.target_delta_yaw,\n self._target_velocity,\n robot_v_to_goal,\n change_in_abs_target_delta_yaw,\n self._target_custom_data,\n self.enable_target_custom_obs,\n self._info_dict,\n random_state\n )\n if termination_provider.should_terminate():\n # print(\"Termination provider\", termination_provider, \"terminated the episode\")\n self._termination_reason = termination_provider\n break\n \n # Step truncaiton providers\n for truncation_provider in self.truncation_providers:\n truncation_provider.step_termination_condition(\n self.robot,\n self.target_goal_world_delta,\n self.target_goal_local,\n self.target_yaw,\n self.target_delta_yaw,\n self._target_velocity,\n robot_v_to_goal,\n change_in_abs_target_delta_yaw,\n self._target_custom_data,\n self.enable_target_custom_obs,\n self._info_dict,\n random_state\n )\n if truncation_provider.should_terminate():\n # print(\"Truncation provider\", truncation_provider, \"truncated the episode\")\n self._truncation_reason = truncation_provider\n break\n\n return self._info_dict.copy()\n\n def after_after_step(\n self,\n random_state : np.random.RandomState\n ):\n if self._has_after_after_step:\n self.target_yaw_provider.after_step_target(\n self.robot,\n self._info_dict,\n random_state\n )\n change_in_abs_target_delta_yaw = self.__update_target()\n robot_v = self.robot.get_3d_linear_velocity()\n robot_v_to_goal = np.dot(\n robot_v[:2], self.target_goal_world_delta_unit\n )\n # Step the target yaw observable\n if self.target_observable is not None:\n self.target_observable.step_target_obs(\n self.robot,\n self.target_goal_world_delta,\n self.target_goal_local,\n self.target_yaw,\n self.target_delta_yaw,\n self._target_velocity,\n robot_v_to_goal,\n change_in_abs_target_delta_yaw,\n self._target_custom_data,\n self.enable_target_custom_obs,\n self._info_dict,\n random_state\n )\n \n # self.reward_provider.step_ex(\n # self.robot,\n # self.target_goal_world_delta,\n # self.target_goal_local,\n # self.target_yaw,\n # self.target_delta_yaw,\n # robot_v_to_goal,\n # change_in_abs_target_delta_yaw,\n # self._target_custom_data,\n # self.enable_target_custom_obs,\n # self._info_dict,\n # random_state\n # )\n # reward_perstep = self.reward_provider.get_reward()\n # #assert reward_perstep is not None and reward_perstep != np.nan\n # self._rew_step = reward_perstep\n\n self._has_after_after_step = False\n return change_in_abs_target_delta_yaw\n else:\n return 0.0\n \n def reset(self, random_state : np.random.RandomState) -> dict[str,Any]:\n self.robot.receive_observation()\n # Reset the info dict\n self._info_dict = {}\n\n # Reset the task-specific variables\n self._target_yaw = 0.0\n self._target_delta_yaw = 0.0\n self._has_after_after_step = False\n\n if not self._inited:\n self._inited = True\n for initializer in self.initializers:\n initializer.perform_reset(\n self.robot,\n self._info_dict,\n self._termination_reason,\n random_state\n )\n\n # call the resetters\n for resetter in self.resetters:\n resetter.perform_reset(\n self.robot,\n self._info_dict,\n self._termination_reason,\n random_state\n )\n\n # Reset the target yaw provider\n self.target_yaw_provider.reset_target(\n self.robot,\n self._info_dict,\n self._termination_reason,\n random_state\n )\n self.__update_target()\n\n # Reset target yaw obs\n if self.target_observable is not None:\n self.target_observable.reset_target_obs(\n self.robot,\n self.target_goal_world_delta,\n self.target_goal_local,\n self.target_yaw,\n self.target_delta_yaw,\n self._target_velocity,\n self._info_dict,\n self._target_custom_data,\n self.enable_target_custom_obs,\n self._termination_reason,\n random_state\n )\n\n # Reset reward provider\n self.reward_provider.reset_reward(\n self.robot,\n self._info_dict,\n self._termination_reason,\n random_state\n )\n\n # Reset termination providers\n for termination_provider in self.termination_providers:\n termination_provider.reset_termination_condition(\n self.robot,\n self._info_dict,\n self._termination_reason,\n random_state\n )\n \n # Reset truncation providers\n for truncation_provider in self.truncation_providers:\n truncation_provider.reset_termination_condition(\n self.robot,\n self._info_dict,\n self._termination_reason,\n random_state\n )\n \n self._termination_reason = None\n self._truncation_reason = None\n self._rew_step = 0.0\n\n # Reset the robot\n self.robot.reset()\n self.robot.receive_observation()\n\n for resetter in self.resetters:\n if hasattr(resetter, \"last_position\"):\n resetter.perform_reset(\n self.robot,\n self._info_dict,\n self._termination_reason,\n random_state\n )\n\n return self._info_dict.copy()\n \n def should_terminate(self) -> bool:\n return self._termination_reason is not None\n\n def should_truncate(self) -> bool:\n return self._truncation_reason is not None" } ]
import gym import gym.spaces import numpy as np import copy from .env import WalkerEnvironment, JoystickEnvironment from ..robot import BaseWalker, BaseWalkerWithFootContact from ..joystick_policy import JoystickPolicy from functools import cached_property from typing import Optional, Any from collections import OrderedDict
7,487
ret_dict["robot/foot_forces"] = gym.spaces.Box( low=-np.inf, high=np.inf, shape=(4,), dtype=np.float32 ) ret_dict["robot/foot_forces_normalized"] = gym.spaces.Box( low=-np.inf, high=np.inf, shape=(4,), dtype=np.float32 ) ret_dict["robot/foot_forces_normalized_masked"] = gym.spaces.Box( low=-np.inf, high=np.inf, shape=(4,), dtype=np.float32 ) ret_dict["robot/foot_contacts"] = gym.spaces.Box( # should use MultiBinary but flatten() does not support having multibinary / box spaces in a Dict low=0, high=1, shape=(4,), dtype=np.float32 ) return gym.spaces.Dict(ret_dict) def extract_observation(self) -> dict[str,Any]: roll, pitch, yaw = self.env.robot.get_roll_pitch_yaw() dr, dp, dy = self.env.robot.get_3d_angular_velocity() imu = np.array([roll, pitch, dr, dp], dtype=np.float32) ret_dict = { "robot/joints_pos": self.env.robot.get_joint_qpos(), "robot/joints_vel": self.env.robot.get_joint_qvel(), "robot/imu": imu, "robot/sensors_gyro": self.env.robot.get_3d_angular_velocity(), "robot/sensors_framequat": self.env.robot.get_framequat_wijk(), "robot/torques": self.env.robot.get_joint_torques(), "robot/sensors_local_velocimeter": self.env.robot.get_3d_local_velocity(), "robot/sensors_local_velocimeter_x": self.env.robot.get_3d_local_velocity()[0:1], "robot/sensors_accelerometer": self.env.robot.get_3d_acceleration_local(), } if isinstance(self.env.robot.unwrapped(), BaseWalkerWithFootContact): ret_dict["robot/foot_forces"] = self.env.robot.get_foot_force() ret_dict["robot/foot_contacts"] = self.env.robot.get_foot_contact() if hasattr(self.env.robot, "foot_contact_no_contact_threshold") and hasattr(self.env.robot, "foot_contact_has_contact_threshold"): ret_dict["robot/foot_forces_normalized"] = (ret_dict["robot/foot_forces"] - self.env.robot.foot_contact_no_contact_threshold) / (self.env.robot.foot_contact_has_contact_threshold - self.env.robot.foot_contact_no_contact_threshold) else: ret_dict["robot/foot_forces_normalized"] = ret_dict["robot/foot_forces"] masked_foot_forces = ret_dict["robot/foot_forces_normalized"].copy() masked_foot_forces[-1] = 0.0 ret_dict["robot/foot_forces_normalized_masked"] = masked_foot_forces return ret_dict class JoystickEnvImpl(gym.Env[dict[str,Any],np.ndarray], WalkerEnvironment, JoystickEnvironment): metadata = { "render_modes": [] } def __init__( self, joystick_policy : JoystickPolicy ): gym.Env.__init__(self) WalkerEnvironment.__init__(self) JoystickEnvironment.__init__(self) # ====================== Store Parameters ====================== self._joystick_policy = joystick_policy self.obs_extractor = JoystickEnvObservationExtractor(self) self.random_state = np.random.RandomState() @property def action_space(self) -> gym.spaces.Box: return gym.spaces.Box( low=self.robot.action_qpos_mins, high=self.robot.action_qpos_maxs, dtype=np.float32 ) @property def observation_space(self) -> gym.spaces.Dict: robot_space = self.obs_extractor.observation_spec real_obs_space = {} for key, space in robot_space.items(): if key.startswith("robot/") and key[len("robot/"):] in self.joystick_policy.enabled_observables: real_obs_space[key] = space if self.joystick_policy.target_observable is not None: real_obs_space["target_obs"] = self.joystick_policy.target_observable.get_observation_spec() if not self.joystick_policy.target_yaw_provider.is_target_velocity_fixed(): real_obs_space["target_vel"] = gym.spaces.Box( low = 0.0, high = np.inf, shape=(1,) ) target_custom_data_spec = self.joystick_policy.target_yaw_provider.get_target_custom_data_observable_spec() if self.joystick_policy.enable_target_custom_obs and target_custom_data_spec is not None: real_obs_space["target_custom"] = target_custom_data_spec # Enforce order real_obs_space = OrderedDict(sorted(real_obs_space.items(), key=lambda t: t[0])) obs_space = gym.spaces.Dict(real_obs_space) return obs_space @property def joystick_policy(self) -> JoystickPolicy: return self._joystick_policy def set_joystick_policy(self, joystick_policy: JoystickPolicy): self._joystick_policy = joystick_policy @property def is_resetter_policy(self) -> bool: return False @property
class JoystickEnvObservationExtractor: def __init__(self, env : "JoystickEnvImpl"): self.env = env @cached_property def observation_spec(self) -> gym.spaces.Dict: ret_dict = { "robot/joints_pos": gym.spaces.Box( low=self.env.robot.joint_qpos_mins, high=self.env.robot.joint_qpos_maxs, shape=(self.env.robot.joint_nums,), dtype=np.float32 ), "robot/joints_vel": gym.spaces.Box( low=-np.inf, high=np.inf, shape=(self.env.robot.joint_nums,), dtype=np.float32 ), "robot/imu": gym.spaces.Box( low=-np.inf, high=np.inf, shape=(4,), ), "robot/sensors_gyro": gym.spaces.Box( low=-np.inf, high=np.inf, shape=(3,), dtype=np.float32 ), "robot/sensors_framequat": gym.spaces.Box( low=-1.0, high=1.0, shape=(4,), dtype=np.float32 ), "robot/torques": gym.spaces.Box( low=-np.inf, high=np.inf, shape=(self.env.robot.joint_nums,), dtype=np.float32 ), "robot/sensors_local_velocimeter": gym.spaces.Box( low=-np.inf, high=np.inf, shape=(3,), dtype=np.float32 ), "robot/sensors_local_velocimeter_x": gym.spaces.Box( low=-np.inf, high=np.inf, shape=(1,), dtype=np.float32 ), "robot/sensors_accelerometer": gym.spaces.Box( low=-np.inf, high=np.inf, shape=(3,), dtype=np.float32 ), } if isinstance(self.env.robot.unwrapped(), BaseWalkerWithFootContact): ret_dict["robot/foot_forces"] = gym.spaces.Box( low=-np.inf, high=np.inf, shape=(4,), dtype=np.float32 ) ret_dict["robot/foot_forces_normalized"] = gym.spaces.Box( low=-np.inf, high=np.inf, shape=(4,), dtype=np.float32 ) ret_dict["robot/foot_forces_normalized_masked"] = gym.spaces.Box( low=-np.inf, high=np.inf, shape=(4,), dtype=np.float32 ) ret_dict["robot/foot_contacts"] = gym.spaces.Box( # should use MultiBinary but flatten() does not support having multibinary / box spaces in a Dict low=0, high=1, shape=(4,), dtype=np.float32 ) return gym.spaces.Dict(ret_dict) def extract_observation(self) -> dict[str,Any]: roll, pitch, yaw = self.env.robot.get_roll_pitch_yaw() dr, dp, dy = self.env.robot.get_3d_angular_velocity() imu = np.array([roll, pitch, dr, dp], dtype=np.float32) ret_dict = { "robot/joints_pos": self.env.robot.get_joint_qpos(), "robot/joints_vel": self.env.robot.get_joint_qvel(), "robot/imu": imu, "robot/sensors_gyro": self.env.robot.get_3d_angular_velocity(), "robot/sensors_framequat": self.env.robot.get_framequat_wijk(), "robot/torques": self.env.robot.get_joint_torques(), "robot/sensors_local_velocimeter": self.env.robot.get_3d_local_velocity(), "robot/sensors_local_velocimeter_x": self.env.robot.get_3d_local_velocity()[0:1], "robot/sensors_accelerometer": self.env.robot.get_3d_acceleration_local(), } if isinstance(self.env.robot.unwrapped(), BaseWalkerWithFootContact): ret_dict["robot/foot_forces"] = self.env.robot.get_foot_force() ret_dict["robot/foot_contacts"] = self.env.robot.get_foot_contact() if hasattr(self.env.robot, "foot_contact_no_contact_threshold") and hasattr(self.env.robot, "foot_contact_has_contact_threshold"): ret_dict["robot/foot_forces_normalized"] = (ret_dict["robot/foot_forces"] - self.env.robot.foot_contact_no_contact_threshold) / (self.env.robot.foot_contact_has_contact_threshold - self.env.robot.foot_contact_no_contact_threshold) else: ret_dict["robot/foot_forces_normalized"] = ret_dict["robot/foot_forces"] masked_foot_forces = ret_dict["robot/foot_forces_normalized"].copy() masked_foot_forces[-1] = 0.0 ret_dict["robot/foot_forces_normalized_masked"] = masked_foot_forces return ret_dict class JoystickEnvImpl(gym.Env[dict[str,Any],np.ndarray], WalkerEnvironment, JoystickEnvironment): metadata = { "render_modes": [] } def __init__( self, joystick_policy : JoystickPolicy ): gym.Env.__init__(self) WalkerEnvironment.__init__(self) JoystickEnvironment.__init__(self) # ====================== Store Parameters ====================== self._joystick_policy = joystick_policy self.obs_extractor = JoystickEnvObservationExtractor(self) self.random_state = np.random.RandomState() @property def action_space(self) -> gym.spaces.Box: return gym.spaces.Box( low=self.robot.action_qpos_mins, high=self.robot.action_qpos_maxs, dtype=np.float32 ) @property def observation_space(self) -> gym.spaces.Dict: robot_space = self.obs_extractor.observation_spec real_obs_space = {} for key, space in robot_space.items(): if key.startswith("robot/") and key[len("robot/"):] in self.joystick_policy.enabled_observables: real_obs_space[key] = space if self.joystick_policy.target_observable is not None: real_obs_space["target_obs"] = self.joystick_policy.target_observable.get_observation_spec() if not self.joystick_policy.target_yaw_provider.is_target_velocity_fixed(): real_obs_space["target_vel"] = gym.spaces.Box( low = 0.0, high = np.inf, shape=(1,) ) target_custom_data_spec = self.joystick_policy.target_yaw_provider.get_target_custom_data_observable_spec() if self.joystick_policy.enable_target_custom_obs and target_custom_data_spec is not None: real_obs_space["target_custom"] = target_custom_data_spec # Enforce order real_obs_space = OrderedDict(sorted(real_obs_space.items(), key=lambda t: t[0])) obs_space = gym.spaces.Dict(real_obs_space) return obs_space @property def joystick_policy(self) -> JoystickPolicy: return self._joystick_policy def set_joystick_policy(self, joystick_policy: JoystickPolicy): self._joystick_policy = joystick_policy @property def is_resetter_policy(self) -> bool: return False @property
def robot(self) -> BaseWalker:
2
2023-11-02 23:21:38+00:00
12k
UMass-Foundation-Model/genome
engine/viper/base_models/xvlm/xvlm.py
[ { "identifier": "VisionTransformer", "path": "engine/viper/base_models/xvlm/vit.py", "snippet": "class VisionTransformer(nn.Module):\n \"\"\" Vision Transformer\n A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` -\n https://arxiv.org/abs/2010.11929\n \"\"\"\n def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,\n num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None, representation_size=None,\n drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=None, local_attn_depth=0):\n \"\"\"\n Args:\n img_size (int, tuple): input image size\n patch_size (int, tuple): patch size\n in_chans (int): number of input channels\n num_classes (int): number of classes for classification head\n embed_dim (int): embedding dimension\n depth (int): depth of transformer\n num_heads (int): number of attention heads\n mlp_ratio (int): ratio of mlp hidden dim to embedding dim\n qkv_bias (bool): enable bias for qkv if True\n qk_scale (float): override default qk scale of head_dim ** -0.5 if set\n representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set\n drop_rate (float): dropout rate\n attn_drop_rate (float): attention dropout rate\n drop_path_rate (float): stochastic depth rate\n norm_layer: (nn.Module): normalization layer\n \"\"\"\n super().__init__()\n self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models\n norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)\n\n self.patch_embed = PatchEmbed(\n img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)\n\n self.num_patch_embed = self.patch_embed.num_patches\n\n self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))\n\n self.num_pos_embed = self.num_patch_embed + 1\n self.pos_embed = nn.Parameter(torch.zeros(1, self.num_pos_embed, embed_dim))\n\n self.pos_drop = nn.Dropout(p=drop_rate)\n\n dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule\n self.blocks = nn.ModuleList([\n Block(\n dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,\n drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)\n for i in range(depth)])\n\n self.depth = depth\n self.local_attn_depth = local_attn_depth # do local attn from index=(depth - local_attn_depth)\n\n self.norm = norm_layer(embed_dim)\n\n trunc_normal_(self.pos_embed, std=.02)\n trunc_normal_(self.cls_token, std=.02)\n self.apply(self._init_weights)\n\n def _init_weights(self, m):\n if isinstance(m, nn.Linear):\n trunc_normal_(m.weight, std=.02)\n if isinstance(m, nn.Linear) and m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.LayerNorm):\n nn.init.constant_(m.bias, 0)\n nn.init.constant_(m.weight, 1.0)\n\n @torch.jit.ignore\n def no_weight_decay(self):\n return {'pos_embed', 'cls_token'}\n\n def forward(self, x, register_blk=-1, idx_to_group_img=None, image_atts=None):\n\n B = x.shape[0]\n x = self.patch_embed(x)\n\n cls_tokens = self.cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks\n x = torch.cat((cls_tokens, x), dim=1)\n \n x = x + self.pos_embed[:,:x.size(1),:]\n x = self.pos_drop(x)\n\n do_gather = True if idx_to_group_img is not None else False\n\n if do_gather and (image_atts is not None):\n full_atts = torch.ones(x.shape[:2], dtype=x.dtype).to(x.device)\n image_atts_blk = torch.cat([image_atts, full_atts], dim=0)\n\n image_atts_blk = image_atts_blk.unsqueeze(1).unsqueeze(2)\n image_atts_blk = (1.0 - image_atts_blk) * -10000.0\n else:\n image_atts_blk = None\n\n for i, blk in enumerate(self.blocks):\n if (self.local_attn_depth > 0) and (i >= self.depth-self.local_attn_depth):\n if do_gather:\n do_gather = False\n\n x_bs = torch.gather(x, dim=0, index=idx_to_group_img.view(-1, 1, 1).expand(-1, x.shape[1], x.shape[2]))\n x = torch.cat([x_bs, x], dim=0)\n\n x = blk(x, register_blk == i, image_atts=image_atts_blk)\n\n else:\n x = blk(x, register_blk==i, image_atts=None)\n\n x = self.norm(x)\n\n if idx_to_group_img is not None:\n bs = len(idx_to_group_img)\n x_bs, x_fullatts = torch.split(x, [bs, x.size(0)-bs])\n return x_bs, x_fullatts\n\n return x" }, { "identifier": "interpolate_pos_embed", "path": "engine/viper/base_models/xvlm/vit.py", "snippet": "def interpolate_pos_embed(pos_embed_checkpoint, num_patches, num_extra_tokens=1):\n # num_patches = visual_encoder.num_patch_embed\n # num_extra_tokens = visual_encoder.num_pos_embed - visual_encoder.num_patch_embed\n\n # interpolate position embedding\n embedding_size = pos_embed_checkpoint.shape[-1]\n # height (== width) for the checkpoint position embedding\n orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)\n # height (== width) for the new position embedding\n new_size = int(num_patches ** 0.5)\n\n if orig_size != new_size:\n # class_token and dist_token are kept unchanged\n extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]\n # only the position tokens are interpolated\n pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]\n pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)\n pos_tokens = torch.nn.functional.interpolate(\n pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)\n pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)\n new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)\n # print('reshape position embedding from %d to %d' % (orig_size ** 2, new_size ** 2))\n\n return new_pos_embed\n else:\n return pos_embed_checkpoint" }, { "identifier": "SwinTransformer", "path": "engine/viper/base_models/xvlm/swin_transformer.py", "snippet": "class SwinTransformer(nn.Module):\n r\"\"\" Swin Transformer\n A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -\n https://arxiv.org/pdf/2103.14030\n\n Args:\n img_size (int | tuple(int)): Input image size. Default 224\n patch_size (int | tuple(int)): Patch size. Default: 4\n in_chans (int): Number of input image channels. Default: 3\n num_classes (int): Number of classes for classification head. Default: 1000\n embed_dim (int): Patch embedding dimension. Default: 96\n depths (tuple(int)): Depth of each Swin Transformer layer.\n num_heads (tuple(int)): Number of attention heads in different layers.\n window_size (int): Window size. Default: 7\n mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4\n qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True\n qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None\n drop_rate (float): Dropout rate. Default: 0\n attn_drop_rate (float): Attention dropout rate. Default: 0\n drop_path_rate (float): Stochastic depth rate. Default: 0.1\n norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.\n ape (bool): If True, add absolute position embedding to the patch embedding. Default: False\n patch_norm (bool): If True, add normalization after patch embedding. Default: True\n use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False\n \"\"\"\n\n def __init__(self, img_size=224, patch_size=4, in_chans=3, num_classes=1000,\n embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24],\n window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None,\n drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,\n norm_layer=nn.LayerNorm, ape=False, patch_norm=True,\n use_checkpoint=False, **kwargs):\n super().__init__()\n\n self.num_classes = num_classes\n self.num_layers = len(depths)\n self.embed_dim = embed_dim\n self.ape = ape\n self.patch_norm = patch_norm\n self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))\n self.mlp_ratio = mlp_ratio\n\n # split image into non-overlapping patches\n self.patch_embed = PatchEmbed(\n img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,\n norm_layer=norm_layer if self.patch_norm else None)\n num_patches = self.patch_embed.num_patches\n patches_resolution = self.patch_embed.patches_resolution\n self.patches_resolution = patches_resolution\n\n # absolute position embedding\n if self.ape:\n self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))\n trunc_normal_(self.absolute_pos_embed, std=.02)\n\n self.pos_drop = nn.Dropout(p=drop_rate)\n\n # stochastic depth\n dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule\n\n # build layers\n self.layers = nn.ModuleList()\n for i_layer in range(self.num_layers):\n layer = BasicLayer(dim=int(embed_dim * 2 ** i_layer),\n input_resolution=(patches_resolution[0] // (2 ** i_layer),\n patches_resolution[1] // (2 ** i_layer)),\n depth=depths[i_layer],\n num_heads=num_heads[i_layer],\n window_size=window_size,\n mlp_ratio=self.mlp_ratio,\n qkv_bias=qkv_bias, qk_scale=qk_scale,\n drop=drop_rate, attn_drop=attn_drop_rate,\n drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],\n norm_layer=norm_layer,\n downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,\n use_checkpoint=use_checkpoint)\n self.layers.append(layer)\n\n self.norm = norm_layer(self.num_features)\n self.avgpool = nn.AdaptiveAvgPool1d(1)\n # self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()\n\n self.apply(self._init_weights)\n\n def _init_weights(self, m):\n if isinstance(m, nn.Linear):\n trunc_normal_(m.weight, std=.02)\n if isinstance(m, nn.Linear) and m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.LayerNorm):\n nn.init.constant_(m.bias, 0)\n nn.init.constant_(m.weight, 1.0)\n\n @torch.jit.ignore\n def no_weight_decay(self):\n return {'absolute_pos_embed'}\n\n @torch.jit.ignore\n def no_weight_decay_keywords(self):\n return {'relative_position_bias_table'}\n\n def forward(self, x, idx_to_group_img=None, image_atts=None, **kwargs):\n x = self.patch_embed(x)\n if self.ape:\n x = x + self.absolute_pos_embed\n x = self.pos_drop(x)\n\n for layer in self.layers:\n x = layer(x)\n\n x = self.norm(x) # B L C\n\n x_cls = self.avgpool(x.transpose(1, 2)) # B C 1\n\n if idx_to_group_img is None:\n return torch.cat([x_cls.transpose(1, 2), x], dim=1)\n else:\n x_bs = torch.gather(x, dim=0, index=idx_to_group_img.view(-1, 1, 1).expand(-1, x.shape[1], x.shape[2]))\n weights = image_atts[:, 1:].unsqueeze(2) # B L 1\n x_bs_cls = torch.sum((weights * x_bs).transpose(1, 2), dim=-1, keepdim=True) # B C 1\n x_bs_cls = x_bs_cls / torch.sum(weights.transpose(1, 2), dim=-1, keepdim=True) # avgpool\n\n return torch.cat([x_bs_cls.transpose(1, 2), x_bs], dim=1), \\\n torch.cat([x_cls.transpose(1, 2), x], dim=1)\n\n def flops(self):\n flops = 0\n flops += self.patch_embed.flops()\n for i, layer in enumerate(self.layers):\n flops += layer.flops()\n flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers)\n flops += self.num_features * self.num_classes\n return flops" }, { "identifier": "interpolate_relative_pos_embed", "path": "engine/viper/base_models/xvlm/swin_transformer.py", "snippet": "def interpolate_relative_pos_embed(rel_pos_bias, dst_num_pos, param_name=''):\n # from: https://github.com/microsoft/unilm/blob/8a0a1c1f4e7326938ea7580a00d56d7f17d65612/beit/run_class_finetuning.py#L348\n\n # rel_pos_bias: relative_position_bias_table\n src_num_pos, num_attn_heads = rel_pos_bias.size()\n\n num_extra_tokens = 0\n src_size = int((src_num_pos - num_extra_tokens) ** 0.5)\n dst_size = int((dst_num_pos - num_extra_tokens) ** 0.5)\n if src_size != dst_size:\n print(\"Position interpolate %s from %dx%d to %dx%d\" % (param_name, src_size, src_size, dst_size, dst_size))\n\n # extra_tokens = rel_pos_bias[-num_extra_tokens:, :]\n # rel_pos_bias = rel_pos_bias[:-num_extra_tokens, :]\n\n def geometric_progression(a, r, n):\n return a * (1.0 - r ** n) / (1.0 - r)\n\n left, right = 1.01, 1.5\n while right - left > 1e-6:\n q = (left + right) / 2.0\n gp = geometric_progression(1, q, src_size // 2)\n if gp > dst_size // 2:\n right = q\n else:\n left = q\n\n # if q > 1.090307:\n # q = 1.090307\n\n dis = []\n cur = 1\n for i in range(src_size // 2):\n dis.append(cur)\n cur += q ** (i + 1)\n\n r_ids = [-_ for _ in reversed(dis)]\n\n x = r_ids + [0] + dis\n y = r_ids + [0] + dis\n\n t = dst_size // 2.0\n dx = np.arange(-t, t + 0.1, 1.0)\n dy = np.arange(-t, t + 0.1, 1.0)\n\n # print(\"Original positions = %s\" % str(x))\n # print(\"Target positions = %s\" % str(dx))\n\n all_rel_pos_bias = []\n\n for i in range(num_attn_heads):\n z = rel_pos_bias[:, i].view(src_size, src_size).float().numpy()\n f = interpolate.interp2d(x, y, z, kind='cubic')\n all_rel_pos_bias.append(\n torch.Tensor(f(dx, dy)).contiguous().view(-1, 1).to(rel_pos_bias.device))\n\n rel_pos_bias = torch.cat(all_rel_pos_bias, dim=-1)\n\n return rel_pos_bias" }, { "identifier": "BertConfig", "path": "engine/viper/base_models/xvlm/xbert.py", "snippet": "_CONFIG_FOR_DOC = \"BertConfig\"\n_TOKENIZER_FOR_DOC = \"BertTokenizer\"\nBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"bert-base-uncased\",\n \"bert-large-uncased\",\n \"bert-base-cased\",\n \"bert-large-cased\",\n \"bert-base-multilingual-uncased\",\n \"bert-base-multilingual-cased\",\n \"bert-base-chinese\",\n \"bert-base-german-cased\",\n \"bert-large-uncased-whole-word-masking\",\n \"bert-large-cased-whole-word-masking\",\n \"bert-large-uncased-whole-word-masking-finetuned-squad\",\n \"bert-large-cased-whole-word-masking-finetuned-squad\",\n \"bert-base-cased-finetuned-mrpc\",\n \"bert-base-german-dbmdz-cased\",\n \"bert-base-german-dbmdz-uncased\",\n \"cl-tohoku/bert-base-japanese\",\n \"cl-tohoku/bert-base-japanese-whole-word-masking\",\n \"cl-tohoku/bert-base-japanese-char\",\n \"cl-tohoku/bert-base-japanese-char-whole-word-masking\",\n \"TurkuNLP/bert-base-finnish-cased-v1\",\n \"TurkuNLP/bert-base-finnish-uncased-v1\",\n \"wietsedv/bert-base-dutch-cased\",\n # See all BERT models at https://huggingface.co/models?filter=bert\n]\nBERT_START_DOCSTRING = r\"\"\"\n This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic\n methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,\n pruning heads etc.)\n This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__\n subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\n general usage and behavior.\n Parameters:\n config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model\n weights.\n\"\"\"\nBERT_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):\n Indices of input sequence tokens in the vocabulary.\n Indices can be obtained using :class:`~transformers.BertTokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for\n details.\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n `What are attention masks? <../glossary.html#attention-mask>`__\n token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,\n 1]``:\n - 0 corresponds to a `sentence A` token,\n - 1 corresponds to a `sentence B` token.\n `What are token type IDs? <../glossary.html#token-type-ids>`_\n position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,\n config.max_position_embeddings - 1]``.\n `What are position IDs? <../glossary.html#position-ids>`_\n head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert :obj:`input_ids` indices into associated\n vectors than the model's internal embedding lookup matrix.\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned\n tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for\n more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n\"\"\"\ndef load_tf_weights_in_bert(model, config, tf_checkpoint_path):\n def __init__(self, config):\n def forward(\n self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0\n ):\n def __init__(self, config, is_cross_attention):\n def save_attn_gradients(self, attn_gradients):\n def get_attn_gradients(self):\n def save_attention_map(self, attention_map):\n def get_attention_map(self):\n def transpose_for_scores(self, x):\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_value=None,\n output_attentions=False,\n ):\n def __init__(self, config):\n def forward(self, hidden_states, input_tensor):\n def __init__(self, config, is_cross_attention=False):\n def prune_heads(self, heads):\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_value=None,\n output_attentions=False,\n ):\n def __init__(self, config):\n def forward(self, hidden_states):\n def __init__(self, config):\n def forward(self, hidden_states, input_tensor):\n def __init__(self, config, layer_num):\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_value=None,\n output_attentions=False,\n ):\n def feed_forward_chunk(self, attention_output):\n def __init__(self, config):\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=False,\n output_hidden_states=False,\n return_dict=True,\n mode='multi_modal',\n ):\n def create_custom_forward(module):\n def custom_forward(*inputs):\n def __init__(self, config):\n def forward(self, hidden_states):\n def __init__(self, config):\n def forward(self, hidden_states):\n def __init__(self, config):\n def forward(self, hidden_states):\n def __init__(self, config):\n def forward(self, sequence_output):\n def __init__(self, config):\n def forward(self, pooled_output):\n def __init__(self, config):\n def forward(self, sequence_output, pooled_output):\n def _init_weights(self, module):\n def __init__(self, config, add_pooling_layer=True):\n def get_input_embeddings(self):\n def set_input_embeddings(self, value):\n def _prune_heads(self, heads_to_prune):\n def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: Tuple[int], device: device, is_decoder: bool) -> Tensor:\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n is_decoder=False,\n mode='multi_modal',\n ):\n def __init__(self, config):\n def get_output_embeddings(self):\n def set_output_embeddings(self, new_embeddings):\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n next_sentence_label=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n def __init__(self, lb_smooth=0.1, reduction='mean', ignore_index=-100):\n def forward(self, logits, label):\n def __init__(self, config, label_smoothing=0.0):\n def get_output_embeddings(self):\n def set_output_embeddings(self, new_embeddings):\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n labels=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n is_decoder=True,\n reduction='mean',\n mode='multi_modal',\n return_logits=False,\n ):\n def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):\n def _reorder_cache(self, past, beam_idx):\n def _generate_no_beam_search(\n self,\n input_ids,\n cur_len,\n max_length,\n do_sample,\n temperature,\n top_k,\n top_p,\n repetition_penalty,\n pad_token_id,\n eos_token_ids,\n batch_size,\n **model_kwargs\n ):\ndef top_k_top_p_filtering(logits, top_k=0, top_p=1.0, filter_value=-float(\"Inf\"), min_tokens_to_keep=1):\n def __init__(self, config):\n def get_output_embeddings(self):\n def set_output_embeddings(self, new_embeddings):\n def gather_seq_out_by_pos(self, seq, pos):\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n is_decoder=False,\n mode='multi_modal',\n return_logits=False,\n masked_pos=None,\n ):\n def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):\n def __init__(self, config):\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs\n ):\n def __init__(self, config):\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n def __init__(self, config):\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n def __init__(self, config):\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n def __init__(self, config):\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n start_positions=None,\n end_positions=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\nclass BertEmbeddings(nn.Module):\nclass BertSelfAttention(nn.Module):\nclass BertSelfOutput(nn.Module):\nclass BertAttention(nn.Module):\nclass BertIntermediate(nn.Module):\nclass BertOutput(nn.Module):\nclass BertLayer(nn.Module):\nclass BertEncoder(nn.Module):\nclass BertPooler(nn.Module):\nclass BertPredictionHeadTransform(nn.Module):\nclass BertLMPredictionHead(nn.Module):\nclass BertOnlyMLMHead(nn.Module):\nclass BertOnlyNSPHead(nn.Module):\nclass BertPreTrainingHeads(nn.Module):\nclass BertPreTrainedModel(PreTrainedModel):\nclass BertForPreTrainingOutput(ModelOutput):\nclass BertModel(BertPreTrainedModel):\nclass BertForPreTraining(BertPreTrainedModel):\nclass LabelSmoothSoftmaxCEV1(nn.Module):\nclass BertLMHeadModel(BertPreTrainedModel):\nclass BertForMaskedLM(BertPreTrainedModel):\nclass BertForNextSentencePrediction(BertPreTrainedModel):\nclass BertForSequenceClassification(BertPreTrainedModel):\nclass BertForMultipleChoice(BertPreTrainedModel):\nclass BertForTokenClassification(BertPreTrainedModel):\nclass BertForQuestionAnswering(BertPreTrainedModel):" } ]
import os import torch import torch.nn as nn import torch.nn.functional as F import torch.distributed as dist import json from functools import partial from engine.viper.base_models.xvlm.vit import VisionTransformer, interpolate_pos_embed from engine.viper.base_models.xvlm.swin_transformer import SwinTransformer, interpolate_relative_pos_embed from engine.viper.base_models.xvlm.xbert import BertConfig, BertForMaskedLM, BertModel
8,016
# Multi-Grained Vision Language Pre-Training: Aligning Texts with Visual Concepts (https://arxiv.org/abs/2111.08276) # Github: https://github.com/zengyan-97/X-VLM # Copyright (c) 2022, ByteDance Inc. # All rights reserved. def read_json(rpath): with open(rpath, 'r') as f: return json.load(f) class AllGather(torch.autograd.Function): """An autograd function that performs allgather on a tensor.""" @staticmethod def forward(ctx, tensor, rank, world_size): output = [torch.empty_like(tensor) for _ in range(world_size)] dist.all_gather(output, tensor) ctx.rank = rank ctx.batch_size = tensor.shape[0] return torch.cat(output, 0) @staticmethod def backward(ctx, grad_output): return ( grad_output[ctx.batch_size * ctx.rank: ctx.batch_size * (ctx.rank + 1)], None, None ) allgather = AllGather.apply def build_vision_encoder(vision_config, load_params=False): """ Args: load_params: False when building fine-tuning models """ vision_width = vision_config['vision_width']
# Multi-Grained Vision Language Pre-Training: Aligning Texts with Visual Concepts (https://arxiv.org/abs/2111.08276) # Github: https://github.com/zengyan-97/X-VLM # Copyright (c) 2022, ByteDance Inc. # All rights reserved. def read_json(rpath): with open(rpath, 'r') as f: return json.load(f) class AllGather(torch.autograd.Function): """An autograd function that performs allgather on a tensor.""" @staticmethod def forward(ctx, tensor, rank, world_size): output = [torch.empty_like(tensor) for _ in range(world_size)] dist.all_gather(output, tensor) ctx.rank = rank ctx.batch_size = tensor.shape[0] return torch.cat(output, 0) @staticmethod def backward(ctx, grad_output): return ( grad_output[ctx.batch_size * ctx.rank: ctx.batch_size * (ctx.rank + 1)], None, None ) allgather = AllGather.apply def build_vision_encoder(vision_config, load_params=False): """ Args: load_params: False when building fine-tuning models """ vision_width = vision_config['vision_width']
vision_encoder = SwinTransformer(img_size=vision_config['image_res'],
2
2023-11-01 16:39:33+00:00
12k
ml4bio/RhoFold
rhofold/model/structure_module.py
[ { "identifier": "Linear", "path": "rhofold/model/primitives.py", "snippet": "class Linear(nn.Linear):\n \"\"\"\n A Linear layer with built-in nonstandard initializations. Called just\n like torch.nn.Linear.\n\n Implements the initializers in 1.11.4, plus some additional ones found\n in the code.\n \"\"\"\n\n def __init__(\n self,\n in_dim: int,\n out_dim: int,\n bias: bool = True,\n ):\n \"\"\"\n Args:\n in_dim:\n The final dimension of inputs to the layer\n out_dim:\n The final dimension of layer outputs\n bias:\n Whether to learn an additive bias. True by default\n \"\"\"\n super(Linear, self).__init__(in_dim, out_dim, bias=bias)\n\n if bias:\n with torch.no_grad():\n self.bias.fill_(0)" }, { "identifier": "LayerNorm", "path": "rhofold/model/primitives.py", "snippet": "class LayerNorm(nn.Module):\n def __init__(self, c_in, eps=1e-5):\n super(LayerNorm, self).__init__()\n \n self.c_in = (c_in,)\n self.eps = eps\n\n self.weight = nn.Parameter(torch.ones(c_in))\n self.bias = nn.Parameter(torch.zeros(c_in))\n\n def forward(self, x): \n\n out = nn.functional.layer_norm(\n x,\n self.c_in,\n self.weight,\n self.bias,\n self.eps,\n )\n\n return out" }, { "identifier": "Rigid", "path": "rhofold/utils/rigid_utils.py", "snippet": "class Rigid:\n \"\"\"\n A class representing a rigid transformation. Little more than a wrapper\n around two objects: a Rotation object and a [*, 3] translation\n Designed to behave approximately like a single torch tensor with the \n shape of the shared batch dimensions of its component parts.\n \"\"\"\n def __init__(self, \n rots: Optional[Rotation],\n trans: Optional[torch.Tensor],\n ):\n \"\"\"\n Args:\n rots: A [*, 3, 3] rotation tensor\n trans: A corresponding [*, 3] translation tensor\n \"\"\"\n # (we need device, dtype, etc. from at least one input)\n\n batch_dims, dtype, device, requires_grad = None, None, None, None\n if(trans is not None):\n batch_dims = trans.shape[:-1]\n dtype = trans.dtype\n device = trans.device\n requires_grad = trans.requires_grad\n elif(rots is not None):\n batch_dims = rots.shape\n dtype = rots.dtype\n device = rots.device\n requires_grad = rots.requires_grad\n else:\n raise ValueError(\"At least one input argument must be specified\")\n\n if(rots is None):\n rots = Rotation.identity(\n batch_dims, dtype, device, requires_grad,\n )\n elif(trans is None):\n trans = identity_trans(\n batch_dims, dtype, device, requires_grad,\n )\n\n if((rots.shape != trans.shape[:-1]) or\n (rots.device != trans.device)):\n raise ValueError(\"Rots and trans incompatible\")\n\n # Force full precision. Happens to the rotations automatically.\n trans = trans.to(dtype=torch.float32)\n\n self._rots = rots\n self._trans = trans\n\n @staticmethod\n def identity(\n shape: Tuple[int], \n dtype: Optional[torch.dtype] = None,\n device: Optional[torch.device] = None, \n requires_grad: bool = True,\n fmt: str = \"quat\",\n ) -> Rigid:\n \"\"\"\n Constructs an identity transformation.\n\n Args:\n shape: \n The desired shape\n dtype: \n The dtype of both internal tensors\n device: \n The device of both internal tensors\n requires_grad: \n Whether grad should be enabled for the internal tensors\n Returns:\n The identity transformation\n \"\"\"\n return Rigid(\n Rotation.identity(shape, dtype, device, requires_grad, fmt=fmt),\n identity_trans(shape, dtype, device, requires_grad),\n )\n\n def __getitem__(self, \n index: Any,\n ) -> Rigid:\n \"\"\" \n Indexes the affine transformation with PyTorch-style indices.\n The index is applied to the shared dimensions of both the rotation\n and the translation.\n\n E.g.::\n\n r = Rotation(rot_mats=torch.rand(10, 10, 3, 3), quats=None)\n t = Rigid(r, torch.rand(10, 10, 3))\n indexed = t[3, 4:6]\n assert(indexed.shape == (2,))\n assert(indexed.get_rots().shape == (2,))\n assert(indexed.get_trans().shape == (2, 3))\n\n Args:\n index: A standard torch tensor index. E.g. 8, (10, None, 3),\n or (3, slice(0, 1, None))\n Returns:\n The indexed tensor \n \"\"\"\n if type(index) != tuple:\n index = (index,)\n \n return Rigid(\n self._rots[index],\n self._trans[index + (slice(None),)],\n )\n\n def __mul__(self,\n right: torch.Tensor,\n ) -> Rigid:\n \"\"\"\n Pointwise left multiplication of the transformation with a tensor.\n Can be used to e.g. mask the Rigid.\n\n Args:\n right:\n The tensor multiplicand\n Returns:\n The product\n \"\"\"\n if not(isinstance(right, torch.Tensor)):\n raise TypeError(\"The other multiplicand must be a Tensor\")\n\n new_rots = self._rots * right\n new_trans = self._trans * right[..., None]\n\n return Rigid(new_rots, new_trans)\n\n def __rmul__(self,\n left: torch.Tensor,\n ) -> Rigid:\n \"\"\"\n Reverse pointwise multiplication of the transformation with a \n tensor.\n\n Args:\n left:\n The left multiplicand\n Returns:\n The product\n \"\"\"\n return self.__mul__(left)\n\n @property\n def shape(self) -> torch.Size:\n \"\"\"\n Returns the shape of the shared dimensions of the rotation and\n the translation.\n \n Returns:\n The shape of the transformation\n \"\"\"\n s = self._trans.shape[:-1]\n return s\n\n @property\n def device(self) -> torch.device:\n \"\"\"\n Returns the device on which the Rigid's tensors are located.\n\n Returns:\n The device on which the Rigid's tensors are located\n \"\"\"\n return self._trans.device\n\n def get_rots(self) -> Rotation:\n \"\"\"\n Getter for the rotation.\n\n Returns:\n The rotation object\n \"\"\"\n return self._rots\n\n def get_trans(self) -> torch.Tensor:\n \"\"\"\n Getter for the translation.\n\n Returns:\n The stored translation\n \"\"\"\n return self._trans\n\n def compose_q_update_vec(self, \n q_update_vec: torch.Tensor,\n ) -> Rigid:\n \"\"\"\n Composes the transformation with a quaternion update vector of\n shape [*, 6], where the final 6 columns represent the x, y, and\n z values of a quaternion of form (1, x, y, z) followed by a 3D\n translation.\n\n Args:\n q_vec: The quaternion update vector.\n Returns:\n The composed transformation.\n \"\"\"\n q_vec, t_vec = q_update_vec[..., :3], q_update_vec[..., 3:]\n new_rots = self._rots.compose_q_update_vec(q_vec)\n\n trans_update = self._rots.apply(t_vec)\n new_translation = self._trans + trans_update\n\n return Rigid(new_rots, new_translation)\n\n def compose(self,\n r: Rigid,\n ) -> Rigid:\n \"\"\"\n Composes the current rigid object with another.\n\n Args:\n r:\n Another Rigid object\n Returns:\n The composition of the two transformations\n \"\"\"\n new_rot = self._rots.compose_r(r._rots)\n new_trans = self._rots.apply(r._trans) + self._trans\n return Rigid(new_rot, new_trans)\n\n def apply(self, \n pts: torch.Tensor,\n ) -> torch.Tensor:\n \"\"\"\n Applies the transformation to a coordinate tensor.\n\n Args:\n pts: A [*, 3] coordinate tensor.\n Returns:\n The transformed points.\n \"\"\"\n rotated = self._rots.apply(pts) \n return rotated + self._trans\n\n def invert_apply(self, \n pts: torch.Tensor\n ) -> torch.Tensor:\n \"\"\"\n Applies the inverse of the transformation to a coordinate tensor.\n\n Args:\n pts: A [*, 3] coordinate tensor\n Returns:\n The transformed points.\n \"\"\"\n pts = pts - self._trans\n return self._rots.invert_apply(pts) \n\n def invert(self) -> Rigid:\n \"\"\"\n Inverts the transformation.\n\n Returns:\n The inverse transformation.\n \"\"\"\n rot_inv = self._rots.invert() \n trn_inv = rot_inv.apply(self._trans)\n\n return Rigid(rot_inv, -1 * trn_inv)\n\n def map_tensor_fn(self, \n fn: Callable[torch.Tensor, torch.Tensor]\n ) -> Rigid:\n \"\"\"\n Apply a Tensor -> Tensor function to underlying translation and\n rotation tensors, mapping over the translation/rotation dimensions\n respectively.\n\n Args:\n fn:\n A Tensor -> Tensor function to be mapped over the Rigid\n Returns:\n The transformed Rigid object\n \"\"\" \n new_rots = self._rots.map_tensor_fn(fn) \n new_trans = torch.stack(\n list(map(fn, torch.unbind(self._trans, dim=-1))), \n dim=-1\n )\n\n return Rigid(new_rots, new_trans)\n\n def to_tensor_4x4(self) -> torch.Tensor:\n \"\"\"\n Converts a transformation to a homogenous transformation tensor.\n\n Returns:\n A [*, 4, 4] homogenous transformation tensor\n \"\"\"\n tensor = self._trans.new_zeros((*self.shape, 4, 4))\n tensor[..., :3, :3] = self._rots.get_rot_mats()\n tensor[..., :3, 3] = self._trans\n tensor[..., 3, 3] = 1\n return tensor\n\n @staticmethod\n def from_tensor_4x4(\n t: torch.Tensor\n ) -> Rigid:\n \"\"\"\n Constructs a transformation from a homogenous transformation\n tensor.\n\n Args:\n t: [*, 4, 4] homogenous transformation tensor\n Returns:\n T object with shape [*]\n \"\"\"\n if(t.shape[-2:] != (4, 4)):\n raise ValueError(\"Incorrectly shaped input tensor\")\n\n rots = Rotation(rot_mats=t[..., :3, :3], quats=None)\n trans = t[..., :3, 3]\n \n return Rigid(rots, trans)\n\n def to_tensor_7(self) -> torch.Tensor:\n \"\"\"\n Converts a transformation to a tensor with 7 final columns, four \n for the quaternion followed by three for the translation.\n\n Returns:\n A [*, 7] tensor representation of the transformation\n \"\"\"\n tensor = self._trans.new_zeros((*self.shape, 7))\n tensor[..., :4] = self._rots.get_quats()\n tensor[..., 4:] = self._trans\n\n return tensor\n\n @staticmethod\n def from_tensor_7(\n t: torch.Tensor,\n normalize_quats: bool = False,\n ) -> Rigid:\n if(t.shape[-1] != 7):\n raise ValueError(\"Incorrectly shaped input tensor\")\n\n quats, trans = t[..., :4], t[..., 4:]\n\n rots = Rotation(\n rot_mats=None, \n quats=quats, \n normalize_quats=normalize_quats\n )\n\n return Rigid(rots, trans)\n\n @staticmethod\n def from_3_points(\n p_neg_x_axis: torch.Tensor, \n origin: torch.Tensor, \n p_xy_plane: torch.Tensor, \n eps: float = 1e-8\n ) -> Rigid:\n \"\"\"\n Implements algorithm 21. Constructs transformations from sets of 3 \n points using the Gram-Schmidt algorithm.\n\n Args:\n p_neg_x_axis: [*, 3] coordinates\n origin: [*, 3] coordinates used as frame origins\n p_xy_plane: [*, 3] coordinates\n eps: Small epsilon value\n Returns:\n A transformation object of shape [*]\n \"\"\"\n p_neg_x_axis = torch.unbind(p_neg_x_axis, dim=-1)\n origin = torch.unbind(origin, dim=-1)\n p_xy_plane = torch.unbind(p_xy_plane, dim=-1)\n\n e0 = [c1 - c2 for c1, c2 in zip(origin, p_neg_x_axis)]\n e1 = [c1 - c2 for c1, c2 in zip(p_xy_plane, origin)]\n\n denom = torch.sqrt(sum((c * c for c in e0)) + eps)\n e0 = [c / denom for c in e0]\n dot = sum((c1 * c2 for c1, c2 in zip(e0, e1)))\n e1 = [c2 - c1 * dot for c1, c2 in zip(e0, e1)]\n denom = torch.sqrt(sum((c * c for c in e1)) + eps)\n e1 = [c / denom for c in e1]\n e2 = [\n e0[1] * e1[2] - e0[2] * e1[1],\n e0[2] * e1[0] - e0[0] * e1[2],\n e0[0] * e1[1] - e0[1] * e1[0],\n ]\n\n rots = torch.stack([c for tup in zip(e0, e1, e2) for c in tup], dim=-1)\n rots = rots.reshape(rots.shape[:-1] + (3, 3))\n\n rot_obj = Rotation(rot_mats=rots, quats=None)\n\n return Rigid(rot_obj, torch.stack(origin, dim=-1))\n\n def unsqueeze(self, \n dim: int,\n ) -> Rigid:\n \"\"\"\n Analogous to torch.unsqueeze. The dimension is relative to the\n shared dimensions of the rotation/translation.\n \n Args:\n dim: A positive or negative dimension index.\n Returns:\n The unsqueezed transformation.\n \"\"\"\n if dim >= len(self.shape):\n raise ValueError(\"Invalid dimension\")\n rots = self._rots.unsqueeze(dim)\n trans = self._trans.unsqueeze(dim if dim >= 0 else dim - 1)\n\n return Rigid(rots, trans)\n\n @staticmethod\n def cat(\n ts: Sequence[Rigid], \n dim: int,\n ) -> Rigid:\n \"\"\"\n Concatenates transformations along a new dimension.\n\n Args:\n ts: \n A list of T objects\n dim: \n The dimension along which the transformations should be \n concatenated\n Returns:\n A concatenated transformation object\n \"\"\"\n rots = Rotation.cat([t._rots for t in ts], dim) \n trans = torch.cat(\n [t._trans for t in ts], dim=dim if dim >= 0 else dim - 1\n )\n\n return Rigid(rots, trans)\n\n def apply_rot_fn(self, fn: Callable[Rotation, Rotation]) -> Rigid:\n \"\"\"\n Applies a Rotation -> Rotation function to the stored rotation\n object.\n\n Args:\n fn: A function of type Rotation -> Rotation\n Returns:\n A transformation object with a transformed rotation.\n \"\"\"\n return Rigid(fn(self._rots), self._trans)\n\n def apply_trans_fn(self, fn: Callable[torch.Tensor, torch.Tensor]) -> Rigid:\n \"\"\"\n Applies a Tensor -> Tensor function to the stored translation.\n\n Args:\n fn: \n A function of type Tensor -> Tensor to be applied to the\n translation\n Returns:\n A transformation object with a transformed translation.\n \"\"\"\n return Rigid(self._rots, fn(self._trans))\n\n def scale_translation(self, trans_scale_factor: float) -> Rigid:\n \"\"\"\n Scales the translation by a constant factor.\n\n Args:\n trans_scale_factor:\n The constant factor\n Returns:\n A transformation object with a scaled translation.\n \"\"\"\n fn = lambda t: t * trans_scale_factor\n return self.apply_trans_fn(fn)\n\n def stop_rot_gradient(self) -> Rigid:\n \"\"\"\n Detaches the underlying rotation object\n\n Returns:\n A transformation object with detached rotations\n \"\"\"\n fn = lambda r: r.detach()\n return self.apply_rot_fn(fn)\n\n @staticmethod\n def make_transform_from_reference(n_xyz, ca_xyz, c_xyz, eps=1e-20):\n \"\"\"\n Returns a transformation object from reference coordinates.\n \n Note that this method does not take care of symmetries. If you \n provide the atom positions in the non-standard way, the N atom will \n end up not at [-0.527250, 1.359329, 0.0] but instead at \n [-0.527250, -1.359329, 0.0]. You need to take care of such cases in \n your code.\n \n Args:\n n_xyz: A [*, 3] tensor of nitrogen xyz coordinates.\n ca_xyz: A [*, 3] tensor of carbon alpha xyz coordinates.\n c_xyz: A [*, 3] tensor of carbon xyz coordinates.\n Returns:\n A transformation object. After applying the translation and \n rotation to the reference backbone, the coordinates will \n approximately equal to the input coordinates.\n \"\"\" \n translation = -1 * ca_xyz\n n_xyz = n_xyz + translation\n c_xyz = c_xyz + translation\n\n c_x, c_y, c_z = [c_xyz[..., i] for i in range(3)]\n norm = torch.sqrt(eps + c_x ** 2 + c_y ** 2)\n sin_c1 = -c_y / norm\n cos_c1 = c_x / norm\n zeros = sin_c1.new_zeros(sin_c1.shape)\n ones = sin_c1.new_ones(sin_c1.shape)\n\n c1_rots = sin_c1.new_zeros((*sin_c1.shape, 3, 3))\n c1_rots[..., 0, 0] = cos_c1\n c1_rots[..., 0, 1] = -1 * sin_c1\n c1_rots[..., 1, 0] = sin_c1\n c1_rots[..., 1, 1] = cos_c1\n c1_rots[..., 2, 2] = 1\n\n norm = torch.sqrt(eps + c_x ** 2 + c_y ** 2 + c_z ** 2)\n sin_c2 = c_z / norm\n cos_c2 = torch.sqrt(c_x ** 2 + c_y ** 2) / norm\n\n c2_rots = sin_c2.new_zeros((*sin_c2.shape, 3, 3))\n c2_rots[..., 0, 0] = cos_c2\n c2_rots[..., 0, 2] = sin_c2\n c2_rots[..., 1, 1] = 1\n c2_rots[..., 2, 0] = -1 * sin_c2\n c2_rots[..., 2, 2] = cos_c2\n\n c_rots = rot_matmul(c2_rots, c1_rots)\n n_xyz = rot_vec_mul(c_rots, n_xyz)\n\n _, n_y, n_z = [n_xyz[..., i] for i in range(3)]\n norm = torch.sqrt(eps + n_y ** 2 + n_z ** 2)\n sin_n = -n_z / norm\n cos_n = n_y / norm\n\n n_rots = sin_c2.new_zeros((*sin_c2.shape, 3, 3))\n n_rots[..., 0, 0] = 1\n n_rots[..., 1, 1] = cos_n\n n_rots[..., 1, 2] = -1 * sin_n\n n_rots[..., 2, 1] = sin_n\n n_rots[..., 2, 2] = cos_n\n\n rots = rot_matmul(n_rots, c_rots)\n\n rots = rots.transpose(-1, -2)\n translation = -1 * translation\n\n rot_obj = Rotation(rot_mats=rots, quats=None)\n\n return Rigid(rot_obj, translation)\n\n def cuda(self) -> Rigid:\n \"\"\"\n Moves the transformation object to GPU memory\n \n Returns:\n A version of the transformation on GPU\n \"\"\"\n return Rigid(self._rots.cuda(), self._trans.cuda())" }, { "identifier": "dict_multimap", "path": "rhofold/utils/tensor_utils.py", "snippet": "def dict_multimap(fn, dicts):\n first = dicts[0]\n new_dict = {}\n for k, v in first.items():\n all_v = [d[k] for d in dicts]\n if type(v) is dict:\n new_dict[k] = dict_multimap(fn, all_v)\n else:\n new_dict[k] = fn(all_v)\n\n return new_dict" }, { "identifier": "permute_final_dims", "path": "rhofold/utils/tensor_utils.py", "snippet": "def permute_final_dims(tensor: torch.Tensor, inds: List[int]):\n zero_index = -1 * len(inds)\n first_inds = list(range(len(tensor.shape[:zero_index])))\n return tensor.permute(first_inds + [zero_index + i for i in inds])" }, { "identifier": "flatten_final_dims", "path": "rhofold/utils/tensor_utils.py", "snippet": "def flatten_final_dims(t: torch.Tensor, no_dims: int):\n return t.reshape(t.shape[:-no_dims] + (-1,))" }, { "identifier": "RNAAlphabet", "path": "rhofold/utils/alphabet.py", "snippet": "class RNAAlphabet(Alphabet):\n\n def get_batch_converter(self):\n if self.use_msa:\n return RNAMSABatchConverter(self)\n else:\n return BatchConverter(self)\n\n @classmethod\n def from_architecture(cls, name: str, ) -> \"RNAAlphabet\":\n if name in (\"RNA MSA Transformer\", \"rna_msa_transformer\", \"RNA\"):\n standard_toks = rna_msaseq_toks[\"toks\"]\n prepend_toks = (\"<cls>\", \"<pad>\", \"<eos>\", \"<unk>\")\n append_toks = (\"<mask>\",)\n prepend_bos = True\n append_eos = False\n use_msa = True\n else:\n raise ValueError(\"Unknown architecture selected\")\n return cls(\n standard_toks, prepend_toks, append_toks, prepend_bos, append_eos, use_msa\n )" }, { "identifier": "RNAConverter", "path": "rhofold/utils/converter.py", "snippet": "class RNAConverter():\n \"\"\"RNA Structure Converter.\"\"\"\n\n def __init__(self):\n \"\"\"\"\"\"\n\n self.eps = 1e-4\n self.__init()\n\n def __init(self):\n \"\"\"\"\"\"\n\n self.cord_dict = defaultdict(dict)\n for resd_name in RNA_CONSTANTS.RESD_NAMES:\n for atom_name, _, cord_vals in RNA_CONSTANTS.ATOM_INFOS_PER_RESD[resd_name]:\n self.cord_dict[resd_name][atom_name] = torch.tensor(cord_vals, dtype=torch.float32)\n\n trans_dict_all = {}\n for resd_name in RNA_CONSTANTS.RESD_NAMES:\n trans_dict = {}\n cord_dict = {}\n\n atom_infos = RNA_CONSTANTS.ATOM_INFOS_PER_RESD[resd_name]\n angl_infos = RNA_CONSTANTS.ANGL_INFOS_PER_RESD[resd_name]\n n_angls = len(angl_infos)\n \n for atom_name, idx_rgrp, _ in atom_infos:\n if idx_rgrp == 0:\n cord_dict[atom_name] = self.cord_dict[resd_name][atom_name]\n\n trans_dict['omega-main'] = (torch.eye(3, dtype=torch.float32), torch.zeros((3), dtype=torch.float32))\n trans_dict['phi-main'] = (torch.eye(3, dtype=torch.float32), torch.zeros((3), dtype=torch.float32))\n\n for idx_angl, (angl_name, _, atom_names_sel) in enumerate(angl_infos):\n x1 = cord_dict[atom_names_sel[0]]\n x2 = cord_dict[atom_names_sel[1]]\n x3 = cord_dict[atom_names_sel[2]]\n rot, tsl_vec = calc_rot_tsl(x1, x3, x3 + (x3 - x2))\n trans_dict['%s-main' % angl_name] = (rot, tsl_vec)\n\n for atom_name, idx_rgrp, _ in atom_infos:\n if idx_rgrp == idx_angl + 3:\n cord_dict[atom_name] = tsl_vec + torch.sum(\n rot * self.cord_dict[resd_name][atom_name].view(1, 3), dim=1)\n\n for idx_angl_src in range(1, n_angls - 1):\n idx_angl_dst = idx_angl_src + 1\n angl_name_src = angl_infos[idx_angl_src][0]\n angl_name_dst = angl_infos[idx_angl_dst][0]\n rot_src, tsl_vec_src = trans_dict['%s-main' % angl_name_src]\n rot_dst, tsl_vec_dst = trans_dict['%s-main' % angl_name_dst]\n rot = torch.matmul(rot_src.transpose(1, 0), rot_dst)\n tsl_vec = torch.matmul(rot_src.transpose(1, 0), tsl_vec_dst - tsl_vec_src)\n trans_dict['%s-%s' % (angl_name_dst, angl_name_src)] = (rot, tsl_vec)\n\n trans_dict_all[resd_name] = trans_dict\n\n self.trans_dict_init = trans_dict_all\n\n def build_cords(self, seq, fram, angl, rtn_cmsk=False):\n\n # initialization\n n_resds = len(seq)\n device = angl.device\n\n angl = angl.squeeze(dim=0) / (torch.norm(angl.squeeze(dim=0), dim=2, keepdim=True) + self.eps)\n rigid = Rigid.from_tensor_7(fram, normalize_quats=True)\n fram = rigid.to_tensor_4x4()\n rot = fram[:,:,:3,:3]\n tsl = fram[:,:,:3,3:].permute(0,1,3,2)\n\n fram = torch.cat([rot, tsl], dim=2)[:,:,:4,:3].permute(1,0,2,3)\n fmsk = torch.ones((n_resds, 1), dtype=torch.int8, device=device)\n amsk = torch.ones((n_resds, RNA_CONSTANTS.N_ANGLS_PER_RESD_MAX), dtype=torch.int8, device=device)\n cord = torch.zeros((n_resds, RNA_CONSTANTS.ATOM_NUM_MAX, 3), dtype=torch.float32, device=device)\n cmsk = torch.zeros((n_resds, RNA_CONSTANTS.ATOM_NUM_MAX), dtype=torch.int8, device=device)\n\n for resd_name in RNA_CONSTANTS.RESD_NAMES:\n idxs = [x for x in range(n_resds) if seq[x] == resd_name]\n if len(idxs) == 0:\n continue\n cord[idxs], cmsk[idxs] =\\\n self.__build_cord(resd_name, fram[idxs], fmsk[idxs], angl[idxs], amsk[idxs])\n\n return (cord, cmsk) if rtn_cmsk else (cord)\n\n def __build_cord(self, resd_name, fram, fmsk, angl, amsk):\n \"\"\"\"\"\"\n\n # initialization\n device = fram.device\n n_resds = fram.shape[0]\n atom_names_all = RNA_CONSTANTS.ATOM_NAMES_PER_RESD[resd_name]\n atom_names_pad = atom_names_all + ['X'] * (RNA_CONSTANTS.ATOM_NUM_MAX - len(atom_names_all))\n atom_infos_all = RNA_CONSTANTS.ATOM_INFOS_PER_RESD[resd_name]\n\n cord_dict = defaultdict(\n lambda: torch.zeros((n_resds, 3), dtype=torch.float32, device=device))\n cmsk_vec_dict = defaultdict(lambda: torch.zeros((n_resds), dtype=torch.int8, device=device))\n\n fram_null = torch.tensor(\n [[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 0]], dtype=torch.float32, device=device)\n fram_dict = defaultdict(lambda: fram_null.unsqueeze(dim=0).repeat(n_resds, 1, 1))\n fmsk_vec_dict = defaultdict(lambda: torch.zeros((n_resds), dtype=torch.int8, device=device))\n\n trans_dict = {'main': (fram[:, 0, :3], fram[:, 0, 3])}\n\n rot_curr, tsl_curr = trans_dict['main']\n atom_names_sel = [x[0] for x in atom_infos_all if x[1] == 0]\n for atom_name_sel in atom_names_sel:\n cord_vec = self.cord_dict[resd_name][atom_name_sel].to(device)\n cord_dict[atom_name_sel] = \\\n tsl_curr + torch.sum(rot_curr * cord_vec.view(1, 1, 3), dim=2)\n cmsk_vec_dict[atom_name_sel] = fmsk[:, 0]\n\n # determine 3D coordinates of atoms belonging to side-chain rigid-groups\n angl_infos_all = RNA_CONSTANTS.ANGL_INFOS_PER_RESD[resd_name]\n rgrp_names_all = ['omega', 'phi'] + [x[0] for x in angl_infos_all]\n\n for idx_rgrp, rgrp_name_curr in enumerate(rgrp_names_all):\n if rgrp_name_curr in ['omega', 'phi', 'angl_0', 'angl_1']:\n rgrp_name_prev = 'main'\n else:\n rgrp_name_prev = 'angl_%d' % (int(rgrp_name_curr[-1]) - 1)\n\n rot_prev, tsl_prev = trans_dict[rgrp_name_prev]\n rot_base, tsl_vec_base = \\\n self.trans_dict_init[resd_name]['%s-%s' % (rgrp_name_curr, rgrp_name_prev)]\n rot_base = rot_base.unsqueeze(dim=0).to(device)\n tsl_base = tsl_vec_base.unsqueeze(dim=0).to(device)\n \n rot_addi, tsl_addi = calc_angl_rot_tsl(angl[:, idx_rgrp])\n rot_curr, tsl_curr = merge_rot_tsl(\n rot_prev, tsl_prev, rot_base, tsl_base, rot_addi, tsl_addi)\n trans_dict[rgrp_name_curr] = (rot_curr, tsl_curr)\n\n fram_dict[rgrp_name_curr] = \\\n torch.cat([rot_curr, tsl_curr.unsqueeze(dim=1)], dim=1)\n fmsk_vec_dict[rgrp_name_curr] = fmsk[:, 0] * amsk[:, idx_rgrp]\n\n atom_names_sel = [x[0] for x in atom_infos_all if x[1] == idx_rgrp + 1]\n for atom_name_sel in atom_names_sel:\n cord_vec = self.cord_dict[resd_name][atom_name_sel].to(device)\n\n cord_dict[atom_name_sel] = \\\n tsl_curr + torch.sum(rot_curr * cord_vec.view(1, 1, 3), dim=2)\n cmsk_vec_dict[atom_name_sel] = fmsk_vec_dict[rgrp_name_curr]\n\n cmsk = torch.stack([cmsk_vec_dict[x] for x in atom_names_pad][:RNA_CONSTANTS.ATOM_NUM_MAX], dim=1)\n cord = torch.stack([cord_dict[x] for x in atom_names_pad][:RNA_CONSTANTS.ATOM_NUM_MAX], dim=1)\n\n return cord, cmsk\n\n def export_pdb_file(self, seq, atom_cords, path, atom_masks=None, confidence=None, chain_id=None, logger = None):\n \"\"\"Export a PDB file.\"\"\"\n\n # configurations\n i_code = ' '\n chain_id = '0' if chain_id is None else chain_id\n occupancy = 1.0\n cord_min = -999.0\n cord_max = 999.0\n seq_len = len(seq)\n\n n_key_atoms = RNA_CONSTANTS.ATOM_NUM_MAX\n\n # take all the atom coordinates as valid, if not specified\n if atom_masks is None:\n atom_masks = np.ones(atom_cords.shape[:-1], dtype=np.int8)\n\n # determine the set of atom names (per residue)\n if atom_cords.ndim == 2:\n if atom_cords.shape[0] == seq_len * n_key_atoms:\n atom_cords = np.reshape(atom_cords, [seq_len, n_key_atoms, 3])\n atom_masks = np.reshape(atom_masks, [seq_len, n_key_atoms])\n else:\n raise ValueError('atom coordinates\\' shape does not match the sequence length')\n\n elif atom_cords.ndim == 3:\n assert atom_cords.shape[0] == seq_len\n atom_cords = atom_cords\n atom_masks = atom_masks\n\n else:\n raise ValueError('atom coordinates must be a 2D or 3D np.ndarray')\n\n # reset invalid values in atom coordinates\n atom_cords = np.clip(atom_cords, cord_min, cord_max)\n atom_cords[np.isnan(atom_cords)] = 0.0\n atom_cords[np.isinf(atom_cords)] = 0.0\n\n # export the 3D structure to a PDB file\n os.makedirs(os.path.dirname(os.path.realpath(path)), exist_ok=True)\n with open(path, 'w') as o_file:\n n_atoms = 0\n for idx_resd, resd_name in enumerate(seq):\n for idx_atom, atom_name in enumerate(RNA_CONSTANTS.ATOM_NAMES_PER_RESD[resd_name]):\n\n temp_factor = 0.0 if confidence is None else \\\n float(100 * confidence.reshape([seq_len])[idx_resd - 1])\n\n if atom_masks[idx_resd, idx_atom] == 0:\n continue\n n_atoms += 1\n charge = atom_name[0]\n line_str = ''.join([\n 'ATOM ',\n '%5d' % n_atoms,\n ' ' + atom_name + ' ' * (3 - len(atom_name)),\n ' %s' % resd_name,\n ' %s' % chain_id,\n ' ' * (4 - len(str(idx_resd + 1))),\n '%s' % str(idx_resd + 1),\n '%s ' % i_code,\n '%8.3f' % atom_cords[idx_resd, idx_atom, 0],\n '%8.3f' % atom_cords[idx_resd, idx_atom, 1],\n '%8.3f' % atom_cords[idx_resd, idx_atom, 2],\n '%6.2f' % occupancy,\n '%6.2f' % temp_factor,\n ' ' * 10,\n '%2s' % charge,\n '%2s' % ' ',\n ])\n assert len(line_str) == 80, 'line length must be exactly 80 characters: ' + line_str\n o_file.write(line_str + '\\n')\n\n if logger is not None:\n logger.info(f' Export PDB file to {path}')" } ]
import math import torch import torch.nn as nn import torch.nn.functional as F from typing import Optional, Tuple, Sequence from rhofold.model.primitives import Linear, LayerNorm from rhofold.utils.rigid_utils import Rigid from rhofold.utils.tensor_utils import ( dict_multimap, permute_final_dims, flatten_final_dims, ) from einops import rearrange from rhofold.utils.alphabet import RNAAlphabet from rhofold.utils.converter import RNAConverter
9,729
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class RefineNet(nn.Module): """""" def __init__(self, dim = 64, is_pos_emb = True, n_layer = 4, enable = True, **kwargs): """Constructor function.""" super().__init__() self.is_pos_emb = is_pos_emb self.alphabet = RNAAlphabet.from_architecture('RNA') self.embed_tokens = nn.Embedding(len(self.alphabet), dim) self.enable = enable if self.is_pos_emb: self.embed_positions = PosEmbedding(4096, dim, self.alphabet.padding_idx) self.refine_layer0 = ResEGNN(corrections=n_layer, dims_in=dim) self.refine_layer1 = ResEGNN(corrections=n_layer, dims_in=dim) self.refine_layer2 = ResEGNN(corrections=n_layer, dims_in=dim) self.refine_layer3 = ResEGNN(corrections=n_layer, dims_in=dim) def forward(self, tokens, cords): """Perform the forward pass. Args: Returns: """ if not self.enable: return cords tokens = tokens[:, 0, :] tokens = tokens.unsqueeze(-1).repeat(1, 1, 23) b, l, n = tokens.shape cords = cords.reshape([b, l, n, 3]) fea = self.embed_tokens(tokens) b, l, n, _ = fea.shape if self.is_pos_emb: fea += self.embed_positions(tokens.reshape(b * l, n)).view(fea.size()) out = self.refine_layer0(fea.reshape([ b * l, n, -1]), cords.reshape([ b * l, n, -1]), is_fea = True) fea, cords = out[-1] fea = fea.reshape([b, l, n, -1]).transpose(1,2) cords = cords.reshape([b, l, n, -1]).transpose(1,2) out = self.refine_layer1(fea.reshape([ b * n, l, -1]), cords.reshape([ b * n, l, -1]), is_fea = True) fea, cords = out[-1] fea = fea.reshape([b, n, l, -1]).transpose(1,2) cords = cords.reshape([b, n, l, -1]).transpose(1,2) out = self.refine_layer2(fea.reshape([ b * l, n, -1]), cords.reshape([ b * l, n, -1]), is_fea = True) fea, cords = out[-1] fea = fea.reshape([b, l, n, -1]).transpose(1,2) cords = cords.reshape([b, l, n, -1]).transpose(1,2) out = self.refine_layer3(fea.reshape([ b * n, l, -1]), cords.reshape([ b * n, l, -1]), is_fea = True) fea, cords = out[-1] cords = cords.reshape([b, n, l, -1]).transpose(1,2) cords = cords.reshape([b, l * n, 3]) return cords class Swish_(torch.nn.Module): def forward(self, x): return x * x.sigmoid() SiLU = torch.nn.SiLU if hasattr(torch.nn, 'SiLU') else Swish_ class CoorsNorm(torch.nn.Module): def __init__(self, eps=1e-8): super().__init__() self.eps = eps
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class RefineNet(nn.Module): """""" def __init__(self, dim = 64, is_pos_emb = True, n_layer = 4, enable = True, **kwargs): """Constructor function.""" super().__init__() self.is_pos_emb = is_pos_emb self.alphabet = RNAAlphabet.from_architecture('RNA') self.embed_tokens = nn.Embedding(len(self.alphabet), dim) self.enable = enable if self.is_pos_emb: self.embed_positions = PosEmbedding(4096, dim, self.alphabet.padding_idx) self.refine_layer0 = ResEGNN(corrections=n_layer, dims_in=dim) self.refine_layer1 = ResEGNN(corrections=n_layer, dims_in=dim) self.refine_layer2 = ResEGNN(corrections=n_layer, dims_in=dim) self.refine_layer3 = ResEGNN(corrections=n_layer, dims_in=dim) def forward(self, tokens, cords): """Perform the forward pass. Args: Returns: """ if not self.enable: return cords tokens = tokens[:, 0, :] tokens = tokens.unsqueeze(-1).repeat(1, 1, 23) b, l, n = tokens.shape cords = cords.reshape([b, l, n, 3]) fea = self.embed_tokens(tokens) b, l, n, _ = fea.shape if self.is_pos_emb: fea += self.embed_positions(tokens.reshape(b * l, n)).view(fea.size()) out = self.refine_layer0(fea.reshape([ b * l, n, -1]), cords.reshape([ b * l, n, -1]), is_fea = True) fea, cords = out[-1] fea = fea.reshape([b, l, n, -1]).transpose(1,2) cords = cords.reshape([b, l, n, -1]).transpose(1,2) out = self.refine_layer1(fea.reshape([ b * n, l, -1]), cords.reshape([ b * n, l, -1]), is_fea = True) fea, cords = out[-1] fea = fea.reshape([b, n, l, -1]).transpose(1,2) cords = cords.reshape([b, n, l, -1]).transpose(1,2) out = self.refine_layer2(fea.reshape([ b * l, n, -1]), cords.reshape([ b * l, n, -1]), is_fea = True) fea, cords = out[-1] fea = fea.reshape([b, l, n, -1]).transpose(1,2) cords = cords.reshape([b, l, n, -1]).transpose(1,2) out = self.refine_layer3(fea.reshape([ b * n, l, -1]), cords.reshape([ b * n, l, -1]), is_fea = True) fea, cords = out[-1] cords = cords.reshape([b, n, l, -1]).transpose(1,2) cords = cords.reshape([b, l * n, 3]) return cords class Swish_(torch.nn.Module): def forward(self, x): return x * x.sigmoid() SiLU = torch.nn.SiLU if hasattr(torch.nn, 'SiLU') else Swish_ class CoorsNorm(torch.nn.Module): def __init__(self, eps=1e-8): super().__init__() self.eps = eps
self.fn = torch.nn.LayerNorm(1)
1
2023-11-01 10:29:08+00:00
12k
ziqi-zhang/TAOISM
python/tensor_loader.py
[ { "identifier": "str_hash", "path": "python/utils/basic_utils.py", "snippet": "def str_hash(s):\n return int(int(hashlib.sha224(s.encode('utf-8')).hexdigest(), 16) % ((1 << 62) - 1))" }, { "identifier": "EnclaveInterface", "path": "python/enclave_interfaces.py", "snippet": "class EnclaveInterface(object):\n EidT = c_uint64\n IdT = c_ulonglong\n TaskIdT = c_uint64\n ArrToEnclaveT = POINTER(c_float)\n ArrEncryptedT = POINTER(c_ubyte)\n SGXLIB = \"App/bin/enclave_bridge.so\"\n DNNLIB = \"lib/sgxdnn.so\"\n\n tag_to_names = {}\n\n def __init__(self):\n import os\n cwd = os.getcwd()\n\n self.eid = None\n self.batchnormID = None\n self.lib = cdll.LoadLibrary(self.SGXLIB)\n\n self.lib.initialize_enclave.restype = self.EidT\n self.lib.destroy_enclave.argtypes = [self.EidT]\n self.lib.CalcEncNeededInByte.argtypes = [c_uint]\n self.lib.CalcEncNeededInByte.restype = c_int\n self.lib.AesEncryptTensor.argtypes = [self.ArrToEnclaveT, c_uint, self.ArrEncryptedT]\n self.lib.AesDecryptTensor.argtypes = [self.ArrEncryptedT, c_uint, self.ArrToEnclaveT]\n self.lib.InitTensor.argtypes = [self.EidT, self.IdT] + [c_int] * 4\n self.lib.SetTen.argtypes = [self.EidT, self.IdT, self.ArrToEnclaveT]\n self.lib.GetTen.argtypes = [self.EidT, self.IdT, self.ArrToEnclaveT]\n self.lib.SetSeed.argtypes = [self.EidT, self.IdT, c_ulonglong]\n self.lib.GetRandom.argtypes = [self.EidT, self.IdT, self.ArrToEnclaveT, c_ulonglong]\n self.lib.AsyncGetRandom.argtypes = self.lib.GetRandom.argtypes\n self.lib.AsyncGetRandom.restype = self.TaskIdT\n self.lib.SgdUpdate.argtypes = [self.EidT] + [self.IdT] * 3 + [c_float] * 4 + [c_bool] * 2\n self.lib.AsyncSgdUpdate.argtypes = [self.EidT] + [self.IdT] * 3 + [c_float] * 4 + [c_bool] * 2\n self.lib.AsyncSgdUpdate.restype = self.TaskIdT\n self.lib.GetTaskStatus.argtypes = [self.TaskIdT]\n self.lib.GetTaskStatus.restype = c_int\n self.lib.AddFromCpu.argtypes = [self.EidT, self.ArrToEnclaveT, self.IdT]\n self.lib.AsyncTask.argtypes = [self.EidT,\n self.IdT, self.ArrToEnclaveT, c_uint64, c_uint64,\n self.IdT, self.ArrToEnclaveT, c_uint64, c_uint64,\n self.IdT, self.ArrToEnclaveT, c_uint64, c_uint64,\n self.IdT, self.ArrToEnclaveT, c_uint64, c_uint64]\n self.lib.ReLUfunction.argtypes = [self.EidT, self.IdT, self.IdT, c_uint64]\n self.lib.QuantReLUfunction.argtypes = [self.EidT, self.IdT, self.IdT, c_uint64, c_float, c_float, c_uint8]\n self.lib.ReLUbackward.argtypes = [self.EidT, self.IdT, self.IdT, self.IdT, c_uint64]\n self.lib.InitMaxpool.argtypes = [self.EidT, self.IdT, self.IdT, self.IdT]\n self.lib.Maxpoolfunction.argtypes = [self.EidT, self.IdT, self.IdT, self.IdT] + [c_uint32] * 12\n self.lib.Maxpoolbackwardfunction.argtypes = [self.EidT, self.IdT, self.IdT, self.IdT] + [c_uint32] * 10\n self.lib.InitBatchnorm.argtypes = [self.EidT] + [self.IdT] * 10 + [c_uint32] * 4 + [c_int] * 2 + [c_float] * 2\n self.lib.BatchnormForward.argtypes = [self.EidT, self.IdT, c_int]\n self.lib.BatchnormBackward.argtypes = [self.EidT, self.IdT]\n self.lib.StochasticQuantize.argtypes = [self.EidT, c_uint64, c_uint64, c_uint64]\n self.lib.AsyncStochasticQuantize.argtypes = self.lib.StochasticQuantize.argtypes\n self.lib.AsyncStochasticQuantize.restype = c_int\n\n self.lib.InitSGXLinear.argtypes = [self.EidT] + [self.IdT] * 5 + [c_uint32] * 3\n self.lib.SGXLinearForward.argtypes = [self.EidT, self.IdT]\n self.lib.InitSGXConv.argtypes = [self.EidT] + [self.IdT] * 5 + [c_uint32] * 10\n self.lib.SGXConvForward.argtypes = [self.EidT, self.IdT]\n\n self.deployed_name_seed = defaultdict(list)\n\n def init_enclave(self):\n self.eid = self.lib.initialize_enclave()\n # print(f\"Eid {self.eid}\")\n\n def destroy_enclave(self):\n self.lib.destroy_enclave(self.get_eid())\n\n def get_eid(self):\n if self.eid is None:\n raise ValueError(\"Eid is None\")\n return self.eid\n\n def set_eid(self, eid):\n self.eid = eid\n\n def name_modifier(self, name):\n return name\n\n def get_tag(self, name, remap=True):\n tag = str_hash(self.name_modifier(name))\n if tag not in self.tag_to_names:\n self.tag_to_names[tag] = self.name_modifier(name)\n if remap:\n return GlobalTensor.get_remapped_tags(tag)\n else:\n return tag\n\n def calc_enc_needed_bytes(self, NumBtye):\n return self.lib.CalcEncNeededInByte(NumBtye)\n\n def create_encrypt_torch(self, shape):\n NeededNumBtype = self.calc_enc_needed_bytes(get_prod(shape))\n return torch.zeros(NeededNumBtype).type(torch.uint8)\n\n def aes_encrypt(self, plain_tensor, enc_tensor):\n self.lib.AesEncryptTensor(get_float_ptr(plain_tensor), get_prod(plain_tensor.size()),\n get_encryption_ptr(enc_tensor))\n\n def aes_decrypt(self, enc_tensor, plain_tensor):\n self.lib.AesDecryptTensor(get_encryption_ptr(enc_tensor), get_prod(plain_tensor.size()),\n get_float_ptr(plain_tensor))\n\n def init_tensor_unsafe(self, tag, size):\n GlobalTensor.init_enclave_tensor(tag, size)\n\n def init_enclave_tensor(self, name, size):\n self.init_tensor_unsafe(self.get_tag(name), size)\n # GlobalTensor.InitEnclaveTensor(self.GetTag(name), size)\n # self.lib.InitTensor(self.GetEid(), self.GetTag(name), size[0], size[1], size[2], size[3])\n\n def set_tensor_unsafe(self, tag, tensor):\n # if len(tensor.shape)>1:\n # print(f\"Set tag {tag}, \", tensor[0,:10])\n self.lib.SetTen(self.get_eid(), tag, get_float_ptr(tensor))\n\n def set_tensor(self, name, tensor):\n # if len(tensor.shape)>1:\n # print(f\"Set {name}, \", tensor[0,:10])\n self.set_tensor_unsafe(self.get_tag(name), tensor)\n # self.lib.SetTen(self.GetEid(), self.GetTag(name), GetFloatPtr(tensor))\n\n def set_enclave_tensor(self, name, tensor):\n self.set_tensor(name, tensor)\n\n def get_tensor(self, name, tensor):\n self.lib.GetTen(self.get_eid(), self.get_tag(name), get_float_ptr(tensor))\n\n def get_enclave_tensor(self, name, tensor):\n self.get_tensor(name, tensor)\n\n def set_seed(self, name, seed):\n name_tag = self.get_tag(name)\n seed_tag = self.get_tag(seed, remap=False)\n self.deployed_name_seed[name_tag].append(seed_tag)\n self.lib.SetSeed(self.get_eid(), name_tag, seed_tag)\n\n def get_validated_name_seed_tag(self, name, seed):\n name_tag = self.get_tag(name)\n seed_tag = self.get_tag(seed, remap=False)\n if seed_tag not in self.deployed_name_seed[name_tag]:\n raise ValueError(f\"Not existing name seed tag pair: name_tag: {name_tag}, seed_tag: {seed_tag}\")\n return name_tag, seed_tag\n\n def get_random(self, name, tensor):\n name_tag, seed_tag = self.get_validated_name_seed_tag(name, name)\n self.lib.GetRandom(self.get_eid(), name_tag, get_float_ptr(tensor), seed_tag)\n\n def enclave_add_from_cpu(self, src, dst_name):\n if isinstance(src, str):\n src_ptr = get_float_ptr(self.get_cpu(src))\n elif isinstance(src, torch.tensor):\n src_ptr = get_float_ptr(src)\n else:\n raise ValueError(\"src has to be str or troch.tensor\")\n self.lib.AddFromCpu(self.get_eid(), src_ptr, self.get_tag(dst_name))\n\n def get_task_status(self, task_id):\n # return True if the task is finished\n # return False otherwise\n res = self.lib.GetTaskStatus(task_id)\n return res == 1\n\n def wait_tasks(self, task_ids):\n while len(task_ids) > 0:\n to_be_removed = []\n for task_id in task_ids:\n status = self.get_task_status(task_id)\n if status:\n to_be_removed.append(task_id)\n for task_id in to_be_removed:\n task_ids.remove(task_id)\n\n def async_get_random(self, name, tensor, seed=\"\"):\n if seed == \"\":\n seed = name\n return self.lib.AsyncGetRandom(self.get_eid(), self.get_tag(name),\n get_float_ptr(tensor), self.get_tag(seed, remap=False))\n\n def async_task(self, name1, arr1, seed1, name2, arr2, seed2, name3, arr3, seed3, name4, arr4, seed4):\n def get_size(t):\n return np.prod(t.size())\n\n print(get_size(arr1))\n self.lib.AsyncTask(self.get_eid(),\n self.get_tag(name1), get_float_ptr(arr1), get_size(arr1), self.get_tag(seed1, remap=False),\n self.get_tag(name2), get_float_ptr(arr2), get_size(arr2), self.get_tag(seed2, remap=False),\n self.get_tag(name3), get_float_ptr(arr3), get_size(arr3), self.get_tag(seed3, remap=False),\n self.get_tag(name4), get_float_ptr(arr4), get_size(arr4), self.get_tag(seed4, remap=False),\n )\n\n def relunew(self, namein, nameout, sizelist):\n self.lib.ReLUfunction(self.get_eid(), self.get_tag(namein), self.get_tag(nameout),\n np.prod(sizelist))\n \n def quant_relunew(self, namein, nameout, sizelist, scale, v_min, zero):\n self.lib.QuantReLUfunction(self.get_eid(), self.get_tag(namein), self.get_tag(nameout),\n np.prod(sizelist), scale, v_min, zero)\n\n def relubackward(self, nameout, namedout, namedin, sizelist):\n self.lib.ReLUbackward(self.get_eid(), self.get_tag(nameout), self.get_tag(namedout), self.get_tag(namedin),\n np.prod(sizelist))\n\n def roundup8(self, number):\n return ((number + 7) & -(8))\n\n def maxpoolinit(self, layer_name, name_in_trans, name_out_trans):\n return self.lib.InitMaxpool(self.get_eid(), self.get_tag(layer_name), self.get_tag(name_in_trans), self.get_tag(name_out_trans))\n\n def maxpoolnew(self, layer_name, namein, nameout, sizelist, outputheight, outputwidth, filterh, filterw, rowstride, colstride,\n rowpad, colpad):\n if rowstride is None and colstride is None and rowpad is None and colpad is None:\n self.lib.Maxpoolfunction(self.get_eid(), self.get_tag(layer_name), self.get_tag(namein), self.get_tag(nameout), sizelist[0],\n sizelist[1], sizelist[2], sizelist[3], outputheight, outputwidth, filterh, filterw,\n filterh, filterw, 0, 0)\n elif rowstride is None and colstride is None and rowpad is not None and colpad is not None:\n self.lib.Maxpoolfunction(self.get_eid(), self.get_tag(layer_name), self.get_tag(namein), self.get_tag(nameout), sizelist[0],\n sizelist[1], sizelist[2], sizelist[3], outputheight, outputwidth, filterh, filterw,\n filterh, filterw, rowpad, colpad)\n elif rowstride is not None and colstride is not None and rowpad is None and colpad is None:\n self.lib.Maxpoolfunction(self.get_eid(), self.get_tag(layer_name), self.get_tag(namein), self.get_tag(nameout), sizelist[0],\n sizelist[1], sizelist[2], sizelist[3], outputheight, outputwidth, filterh, filterw,\n rowstride, colstride, 0, 0)\n else:\n self.lib.Maxpoolfunction(self.get_eid(), self.get_tag(layer_name), self.get_tag(namein), self.get_tag(nameout), sizelist[0],\n sizelist[1], sizelist[2], sizelist[3], outputheight, outputwidth, filterh, filterw,\n rowstride, colstride, rowpad, colpad)\n\n def maxpoolback(self, layer_name, namedout, namedin, sizelist, outputheight, outputwidth, filterh, filterw, rowstride,\n colstride, rowpad, colpad):\n if rowstride is None and colstride is None and rowpad is None and colpad is None:\n self.lib.Maxpoolbackwardfunction(self.get_eid(), self.get_tag(layer_name), self.get_tag(namedout), self.get_tag(namedin), sizelist[0],\n sizelist[1], sizelist[2], sizelist[3], outputheight, outputwidth, filterh,\n filterw, filterh, filterw)\n elif rowstride is None and colstride is None and rowpad is not None and colpad is not None:\n self.lib.Maxpoolbackwardfunction(self.get_eid(), self.get_tag(layer_name), self.get_tag(namedout), self.get_tag(namedin), sizelist[0],\n sizelist[1], sizelist[2] + rowpad, sizelist[3] + colpad, outputheight,\n outputwidth, filterh, filterw, filterh, filterw)\n elif rowstride is not None and colstride is not None and rowpad is None and colpad is None:\n self.lib.Maxpoolbackwardfunction(self.get_eid(), self.get_tag(layer_name), self.get_tag(namedout), self.get_tag(namedin), sizelist[0],\n sizelist[1], sizelist[2], sizelist[3], outputheight, outputwidth, filterh,\n filterw, rowstride, colstride)\n else:\n self.lib.Maxpoolbackwardfunction(self.get_eid(), self.get_tag(layer_name), self.get_tag(namedout), self.get_tag(namedin), sizelist[0],\n sizelist[1], sizelist[2] + rowpad, sizelist[3] + colpad, outputheight,\n outputwidth, filterh, filterw, rowstride, colstride)\n\n def batchnorm_init(self, layer_name,\n input_name, output_name, gamma_name, beta_name,\n # der_input_name, der_output_name, der_gamma_name, der_beta_name,\n run_mean_name, run_var_name, cur_mean_name, cur_var_name,\n mu_name,\n batch_size, num_channel, img_h, img_w,\n is_affine, is_cumulative, momentum, epsilon):\n \n self.lib.InitBatchnorm(\n self.get_eid(), \n self.get_tag(layer_name), self.get_tag(input_name), self.get_tag(output_name), self.get_tag(gamma_name), self.get_tag(beta_name),\n self.get_tag(run_mean_name), self.get_tag(run_var_name), self.get_tag(cur_mean_name), self.get_tag(cur_var_name),\n self.get_tag(mu_name),\n batch_size, num_channel, img_h, img_w,\n is_affine, is_cumulative, momentum, epsilon)\n\n def batchnorm_forward(self, layer_name, training):\n self.lib.BatchnormForward(self.get_eid(), self.get_tag(layer_name), int(training))\n\n def batchnorm_backward(self, layer_name):\n self.lib.BatchnormBackward(self.get_eid(), self.get_tag(layer_name))\n\n def async_masking_c01(self, store_name, main_seed, seed0, seed1, dst_tensor):\n return self.lib.AsyncMaskingC01(self.get_eid(), self.get_tag(store_name),\n self.get_tag(main_seed, remap=False),\n self.get_tag(seed0, remap=False),\n self.get_tag(seed1, remap=False),\n get_float_ptr(dst_tensor))\n\n def sgd_update(self, param_name=required, grad_name=required, momentum_name=None,\n lr=None, momentum=0, weight_decay=0, dampening=0, nesterov=False,\n first_momentum=False, is_async=True):\n if param_name is required:\n raise ValueError(\"param_name is required\")\n if grad_name is required:\n raise ValueError(\"grad_name is required\")\n if not (0 <= momentum <= 1):\n raise ValueError(\"momentum has to in [0, 1]\")\n if 0 < momentum < 1 and momentum_name is None:\n raise ValueError(\"momentum name cannot be None\")\n if not (0 <= weight_decay <= 1):\n raise ValueError(\"momentum has to in [0, 1]\")\n if lr is None or lr < 0:\n raise ValueError(\"learning rate has to be positive\")\n if momentum_name is None:\n raise NotImplementedError\n if dampening != 0:\n raise NotImplementedError\n if nesterov:\n raise NotImplementedError\n\n param_tag = self.get_tag(param_name)\n grad_tag = self.get_tag(grad_name)\n momentum_tag = self.get_tag(momentum_name) if momentum_name is not None else 0\n\n func = self.lib.AsyncSgdUpdate if is_async else self.lib.SgdUpdate\n\n return func(self.get_eid(), param_tag, grad_tag, momentum_tag,\n lr, momentum, weight_decay, dampening, nesterov, first_momentum)\n\n def quantize(self, src_name, dst_name, q_tag, is_async=True):\n if is_async:\n func = self.lib.AsyncStochasticQuantize\n else:\n func = self.lib.StochasticQuantize\n return func(self.get_eid(), self.get_tag(src_name), self.get_tag(dst_name), self.get_tag(q_tag, remap=False))\n\n def sgx_linear_init(\n self, layer_name, \n input_name, output_name, weight_name, bias_name,\n # der_input_name, der_output_name, der_weight_name, der_bias_name,\n batch_size, input_size, output_size,\n ):\n self.lib.InitSGXLinear(\n self.get_eid(), self.get_tag(layer_name),\n self.get_tag(input_name), self.get_tag(output_name), self.get_tag(weight_name), self.get_tag(bias_name), \n # self.get_tag(der_input_name), self.get_tag(der_output_name), self.get_tag(der_weight_name), self.get_tag(der_bias_name), \n batch_size, input_size, output_size,\n )\n\n def sgx_linear_forward(self, layer_name):\n self.lib.SGXLinearForward(\n self.get_eid(), self.get_tag(layer_name),\n )\n\n def sgx_conv_init(\n self, layer_name, \n input_name, output_name, weight_name, bias_name,\n # der_input_name, der_output_name, der_weight_name, der_bias_name,\n batch_size, input_h, input_w, input_c, output_h, output_w, output_c, \n kernel, padding, stride,\n ):\n self.lib.InitSGXConv(\n self.get_eid(), self.get_tag(layer_name),\n self.get_tag(input_name), self.get_tag(output_name), self.get_tag(weight_name), self.get_tag(bias_name),\n # self.get_tag(der_input_name), self.get_tag(der_output_name), self.get_tag(der_weight_name), self.get_tag(der_bias_name),\n batch_size, input_h, input_w, input_c, output_h, output_w, output_c,\n kernel, padding, stride,\n )\n\n def sgx_conv_forward(self, layer_name):\n self.lib.SGXConvForward(\n self.get_eid(), self.get_tag(layer_name),\n )\n\n @staticmethod\n def print_tensor_link_relation():\n print(\"=\"*30, \"Print Tensor Link Relation\", \"=\"*30)\n for tag, friends in GlobalTensor.InverseLinkedTags.items():\n if \"Der\" in EnclaveInterface.tag_to_names[tag]:\n continue\n friend_info = \"[\"\n for f in friends:\n friend_info += EnclaveInterface.tag_to_names[f]\n friend_info += \", \"\n friend_info += \"]\"\n print(EnclaveInterface.tag_to_names[tag], \": \", friend_info)\n print(\"=\"*60)" }, { "identifier": "GlobalTensor", "path": "python/enclave_interfaces.py", "snippet": "class GlobalTensor(object):\n cpu_tensor = {}\n gpu_tensors = {}\n encrypted_tensors = {}\n LinkedTags = {}\n InverseLinkedTags = {}\n IsInitEnclaveTensor = {}\n EnclaveInterface = None\n eid = None\n is_init_global_tensor = False\n\n @staticmethod\n def init():\n if GlobalTensor.is_init_global_tensor:\n return\n GlobalTensor.EnclaveInterface = EnclaveInterface()\n GlobalTensor.EnclaveInterface.init_enclave()\n GlobalTensor.is_init_global_tensor = True\n\n @staticmethod\n def destroy():\n GlobalTensor.EnclaveInterface.destroy_enclave()\n\n GlobalTensor.cpu_tensor = {}\n GlobalTensor.gpu_tensors = {}\n GlobalTensor.encrypted_tensors = {}\n GlobalTensor.LinkedTags = {}\n GlobalTensor.InverseLinkedTags = {}\n GlobalTensor.IsInitEnclaveTensor = {}\n GlobalTensor.EnclaveInterface = None\n GlobalTensor.eid = None\n GlobalTensor.is_init_global_tensor = False\n\n\n @staticmethod\n def get_eid():\n return GlobalTensor.EnclaveInterface.get_eid()\n\n @staticmethod\n def link_tags(tag1, tag2):\n if tag1 == tag2:\n return\n\n friends = []\n\n def add_friends(tag):\n nonlocal friends\n if tag in GlobalTensor.LinkedTags:\n its_leader_tag = GlobalTensor.LinkedTags[tag]\n if its_leader_tag in GlobalTensor.InverseLinkedTags:\n friends += GlobalTensor.InverseLinkedTags.pop(its_leader_tag)\n else:\n friends += [tag]\n\n add_friends(tag1)\n add_friends(tag2)\n leader_tag = min(friends)\n\n GlobalTensor.InverseLinkedTags[leader_tag] = friends\n for t in friends:\n if t in GlobalTensor.IsInitEnclaveTensor:\n raise ValueError(\"Tags must linked before tensor initialization\")\n GlobalTensor.LinkedTags[t] = leader_tag\n\n @staticmethod\n def get_remapped_tags(tag):\n return GlobalTensor.LinkedTags[tag] if tag in GlobalTensor.LinkedTags else tag\n\n @staticmethod\n def set_cpu(tag, tensor):\n GlobalTensor.cpu_tensor[tag] = tensor.to(torch.device(\"cpu\"))\n\n @staticmethod\n def set_gpu(tag, tensor):\n GlobalTensor.gpu_tensors[tag] = tensor\n\n @staticmethod\n def set_encrypted(tag, tensor):\n GlobalTensor.encrypted_tensors[tag] = tensor\n\n @staticmethod\n def get_cpu(tag):\n return GlobalTensor.cpu_tensor[tag]\n\n @staticmethod\n def get_gpu(tag):\n return GlobalTensor.gpu_tensors[tag]\n\n @staticmethod\n def get_encryption(tag):\n return GlobalTensor.encrypted_tensors[tag]\n\n @staticmethod\n def init_enclave_tensor(tag, size):\n size = list(size)\n if len(size) < 4:\n size = [1] * (4 - len(size)) + size\n remapped_tag = GlobalTensor.get_remapped_tags(tag)\n if remapped_tag in GlobalTensor.IsInitEnclaveTensor:\n return\n else:\n GlobalTensor.IsInitEnclaveTensor[remapped_tag] = True\n eid = GlobalTensor.get_eid()\n GlobalTensor.EnclaveInterface.lib.InitTensor(eid, remapped_tag, size[0], size[1], size[2], size[3])\n\n @staticmethod\n def init_encrypted_tensor(tag, shape):\n GlobalTensor.encrypted_tensors[GlobalTensor.get_remapped_tags(tag)] = \\\n GlobalTensor.EnclaveInterface.create_encrypt_torch(shape)" }, { "identifier": "SecretConfig", "path": "python/global_config.py", "snippet": "class SecretConfig(object):\n worldSize = 3\n PrimeLimit = (1 << 21) - 9\n dtypeForCpuMod = torch.float32\n dtypeForCudaMm = torch.float64\n dtypeForCpuOp = torch.float32\n dtypeForSave = torch.float32\n stateless_logfile = \"stateless.log\"\n stateless_logger_name = \"stateless_logger\"\n is_comptue_gpu = True" } ]
from types import MethodType from pdb import set_trace as st from python.utils.basic_utils import str_hash from python.enclave_interfaces import EnclaveInterface, GlobalTensor from python.global_config import SecretConfig import torch
7,239
self.sid = -1 self.tensor_name_list = [] self.encryption_tensor_name_list = {} self.RandomVarName = None self.ShareVarName = None self.ShareTuple = None def init(self, start_enclave=True): if start_enclave: print("Initializing sid: %d" % self.sid) self.init_enclave() self.generate_tensor_name_list() # if hasattr(self, "LayerName") and self.LayerName == "Layer1.0.main.relu2": # st() self.init_enclave_tensors() self.init_cpu_tensor() self.init_encryption_tensor() def generate_tensor_name_list(self, force=False): return def link_tensors(self): pass def init_enclave_tensors(self): self.generate_tensor_name_list() for TensorName, shape, SeedList in self.tensor_name_list: if shape is None: raise ValueError("The shape is None. Please setup the shape before init_enclave_tensor") # print(f"TensorLoader init {TensorName}, {shape}") self.init_enclave_tensor(TensorName, shape) if SeedList is None: continue for seed in SeedList: self.set_seed(TensorName, seed) def set_cpu(self, name, t): # print("---", name, self.get_tag(name)) GlobalTensor.set_cpu(self.get_tag(name), t) def set_gpu(self, name, t): GlobalTensor.set_gpu(self.get_tag(name), t) def set_encryption(self, name, t): GlobalTensor.set_encryption(self.get_tag(name), t) def get_cpu(self, name): return GlobalTensor.get_cpu(self.get_tag(name)) def get_gpu(self, name): return GlobalTensor.get_gpu(self.get_tag(name)) def get_encryption(self, name): return GlobalTensor.get_encryption(self.get_tag(name)) def generate_cpu_tensor(self, name, shape): self.set_cpu(name, torch.zeros(shape).type(SecretConfig.dtypeForCpuOp)) # self.CpuTensors[name] = torch.zeros(shape).type(SecretConfig.dtypeForCpuOp) def transfer_cpu_to_gpu(self, name): self.set_gpu(name, self.get_cpu(name).cuda(non_blocking=True).type(SecretConfig.dtypeForCudaMm)) # self.GpuTensors[name] = self.CpuTensors[name].cuda(non_blocking=True).type(SecretConfig.dtypeForCudaMm) def transfer_gpu_to_cpu(self, name): cpu_tensor = self.get_cpu(name) gpu_tensor = self.get_gpu(name) cpu_tensor.copy_(gpu_tensor.type(SecretConfig.dtypeForCpuOp)) def transfer_enclave_to_cpu(self, name): self.from_enclave(name, self.get_cpu(name)) def transfer_cpu_to_enclave(self, name): self.set_tensor(name, self.get_cpu(name)) def init_cpu_tensor(self): self.generate_tensor_name_list() for TensorName, shape, _ in self.tensor_name_list: self.generate_cpu_tensor(TensorName, shape) def init_encryption_tensor(self): self.generate_tensor_name_list() for name, shape in self.encryption_tensor_name_list: GlobalTensor.init_encrypted_tensor(self.get_tag(name), shape) # self.EncrtyptedTensors[name] = self.CreateEncryptTorch(shape) def set_tensor_cpu_enclave(self, name, tensor): # GlobalTensor.SetNamedTensor(self.GetTag(tag), tensor) self.set_cpu(name, tensor) self.set_tensor(name, tensor) # print("Set cpu enclave: ", tensor[0,:10]) def set_tensor_cpu_gpu_enclave(self, name, tensor): # GlobalTensor.SetNamedTensor(self.GetTag(tag), tensor) self.set_cpu(name, tensor) self.set_tensor(name, tensor) self.set_gpu(name, tensor) # print("Set cpu enclave: ", tensor[0,:10]) def from_enclave(self, name, tensor): self.get_tensor(name, tensor) # def generate_enclave_tensor(self, name): # if name in self.RandomVarName: # return self.async_get_random(name, self.get_cpu(name)) # elif name in self.ShareVarName: # original, seed = self.ShareTuple[name] # return self.async_get_share(original, self.get_cpu(name), seed) # else: # raise Exception("Doesnt how to generate this tensor") def tensor_loader_factory(sid, tensor_loader_name): GlobalTensor.init() tensor_loader = TensorLoader() tensor_loader.Name = tensor_loader_name
class TensorLoader(EnclaveInterface): def __init__(self): super().__init__() self.sid = -1 self.tensor_name_list = [] self.encryption_tensor_name_list = {} self.RandomVarName = None self.ShareVarName = None self.ShareTuple = None def init(self, start_enclave=True): if start_enclave: print("Initializing sid: %d" % self.sid) self.init_enclave() self.generate_tensor_name_list() # if hasattr(self, "LayerName") and self.LayerName == "Layer1.0.main.relu2": # st() self.init_enclave_tensors() self.init_cpu_tensor() self.init_encryption_tensor() def generate_tensor_name_list(self, force=False): return def link_tensors(self): pass def init_enclave_tensors(self): self.generate_tensor_name_list() for TensorName, shape, SeedList in self.tensor_name_list: if shape is None: raise ValueError("The shape is None. Please setup the shape before init_enclave_tensor") # print(f"TensorLoader init {TensorName}, {shape}") self.init_enclave_tensor(TensorName, shape) if SeedList is None: continue for seed in SeedList: self.set_seed(TensorName, seed) def set_cpu(self, name, t): # print("---", name, self.get_tag(name)) GlobalTensor.set_cpu(self.get_tag(name), t) def set_gpu(self, name, t): GlobalTensor.set_gpu(self.get_tag(name), t) def set_encryption(self, name, t): GlobalTensor.set_encryption(self.get_tag(name), t) def get_cpu(self, name): return GlobalTensor.get_cpu(self.get_tag(name)) def get_gpu(self, name): return GlobalTensor.get_gpu(self.get_tag(name)) def get_encryption(self, name): return GlobalTensor.get_encryption(self.get_tag(name)) def generate_cpu_tensor(self, name, shape): self.set_cpu(name, torch.zeros(shape).type(SecretConfig.dtypeForCpuOp)) # self.CpuTensors[name] = torch.zeros(shape).type(SecretConfig.dtypeForCpuOp) def transfer_cpu_to_gpu(self, name): self.set_gpu(name, self.get_cpu(name).cuda(non_blocking=True).type(SecretConfig.dtypeForCudaMm)) # self.GpuTensors[name] = self.CpuTensors[name].cuda(non_blocking=True).type(SecretConfig.dtypeForCudaMm) def transfer_gpu_to_cpu(self, name): cpu_tensor = self.get_cpu(name) gpu_tensor = self.get_gpu(name) cpu_tensor.copy_(gpu_tensor.type(SecretConfig.dtypeForCpuOp)) def transfer_enclave_to_cpu(self, name): self.from_enclave(name, self.get_cpu(name)) def transfer_cpu_to_enclave(self, name): self.set_tensor(name, self.get_cpu(name)) def init_cpu_tensor(self): self.generate_tensor_name_list() for TensorName, shape, _ in self.tensor_name_list: self.generate_cpu_tensor(TensorName, shape) def init_encryption_tensor(self): self.generate_tensor_name_list() for name, shape in self.encryption_tensor_name_list: GlobalTensor.init_encrypted_tensor(self.get_tag(name), shape) # self.EncrtyptedTensors[name] = self.CreateEncryptTorch(shape) def set_tensor_cpu_enclave(self, name, tensor): # GlobalTensor.SetNamedTensor(self.GetTag(tag), tensor) self.set_cpu(name, tensor) self.set_tensor(name, tensor) # print("Set cpu enclave: ", tensor[0,:10]) def set_tensor_cpu_gpu_enclave(self, name, tensor): # GlobalTensor.SetNamedTensor(self.GetTag(tag), tensor) self.set_cpu(name, tensor) self.set_tensor(name, tensor) self.set_gpu(name, tensor) # print("Set cpu enclave: ", tensor[0,:10]) def from_enclave(self, name, tensor): self.get_tensor(name, tensor) # def generate_enclave_tensor(self, name): # if name in self.RandomVarName: # return self.async_get_random(name, self.get_cpu(name)) # elif name in self.ShareVarName: # original, seed = self.ShareTuple[name] # return self.async_get_share(original, self.get_cpu(name), seed) # else: # raise Exception("Doesnt how to generate this tensor") def tensor_loader_factory(sid, tensor_loader_name): GlobalTensor.init() tensor_loader = TensorLoader() tensor_loader.Name = tensor_loader_name
tensor_loader.LayerId = str_hash(tensor_loader.Name)
0
2023-11-01 10:37:37+00:00
12k
NVlabs/M2T2
m2t2/m2t2.py
[ { "identifier": "ActionDecoder", "path": "m2t2/action_decoder.py", "snippet": "class ActionDecoder(torch.nn.Module):\n def __init__(\n self, mask_dim, use_embed, embed_dim, max_num_pred, num_params,\n hidden_dim, num_layers, activation, offset_bins\n ):\n super(ActionDecoder, self).__init__()\n feat_dim = mask_dim\n if use_embed:\n feat_dim += embed_dim\n self.feat_dim = feat_dim\n self.use_embed = use_embed\n self.contact_dir_head = MLP(\n feat_dim, hidden_dim, 3, num_layers, activation\n )\n self.approach_dir_head = MLP(\n feat_dim, hidden_dim, 3, num_layers, activation\n )\n self.offset_head = MLP(\n feat_dim, hidden_dim, len(offset_bins) - 1,\n num_layers, activation\n )\n offset_bins = torch.tensor(offset_bins).float()\n self.offset_vals = (offset_bins[:-1] + offset_bins[1:]) / 2\n self.max_num_pred = max_num_pred\n self.param_head, self.release_head = None, None\n if num_params > 0:\n self.param_head = MLP(\n embed_dim, hidden_dim, num_params, num_layers, activation\n )\n self.release_head = MLP(\n embed_dim, hidden_dim, 1, num_layers, activation\n )\n\n @classmethod\n def from_config(cls, cfg, contact_decoder):\n args = {}\n args['mask_dim'] = contact_decoder.mask_dim\n args['use_embed'] = cfg.use_embed\n args['embed_dim'] = contact_decoder.embed_dim\n args['max_num_pred'] = cfg.max_num_pred\n args['num_params'] = cfg.num_params\n args['hidden_dim'] = cfg.hidden_dim\n args['num_layers'] = cfg.num_layers\n args['activation'] = cfg.activation\n args['offset_bins'] = cfg.offset_bins\n return cls(**args)\n\n def forward(\n self, xyz, mask_feats, confidence, mask_thresh, embedding, gt_masks=None\n ):\n mask_feats = mask_feats.moveaxis(1, -1) # [B, H, W, mask_dim]\n contacts, conf_all, inputs, num_grasps = [], [], [], []\n total_grasps, num_objs = 0, 0\n for i, (pts, feat, emb, conf) in enumerate(\n zip(xyz, mask_feats, embedding, confidence)\n ):\n mask = conf > mask_thresh\n if gt_masks is not None:\n mask = mask | (gt_masks[i] > 0)\n conf_list, num = [], []\n for e, m, conf in zip(emb, mask, conf):\n f, p, c = feat[m], pts[m], conf[m]\n if self.max_num_pred is not None:\n perm = torch.randperm(f.shape[0])[:self.max_num_pred]\n perm = perm.to(f.device)\n f, p, c = f[perm], p[perm], c[perm]\n if self.use_embed:\n f = torch.cat([\n f, repeat_new_axis(e, f.shape[0], dim=0)\n ], dim=-1)\n contacts.append(p)\n inputs.append(f)\n conf_list.append(c)\n num.append(f.shape[0])\n total_grasps += f.shape[0]\n conf_all.append(conf_list)\n num_grasps.append(num)\n num_objs += conf.shape[0]\n if total_grasps > 0:\n contacts = torch.cat(contacts)\n inputs = torch.cat(inputs)\n else:\n contacts = torch.zeros(0, 3).to(xyz.device)\n inputs = torch.zeros(0, self.feat_dim).to(xyz.device)\n\n if gt_masks is not None:\n gt_inputs, total_gt_grasps = [], 0\n for feat, emb, mask in zip(mask_feats, embedding, gt_masks):\n for e, m in zip(emb, mask):\n f = feat[m > 0]\n if self.use_embed:\n f = torch.cat([\n f, repeat_new_axis(e, f.shape[0], 0)\n ], dim=-1)\n gt_inputs.append(f)\n total_gt_grasps += f.shape[0]\n if total_gt_grasps > 0:\n gt_inputs = torch.cat(gt_inputs)\n else:\n gt_inputs = torch.zeros(0, self.feat_dim).to(xyz.device)\n inputs = torch.cat([inputs, gt_inputs])\n\n contact_dirs = F.normalize(self.contact_dir_head(inputs), dim=-1)\n approach_dirs = self.approach_dir_head(inputs)\n approach_dirs = F.normalize(\n approach_dirs - contact_dirs * (\n approach_dirs * contact_dirs\n ).sum(dim=-1, keepdim=True), dim=-1\n )\n offset_logits = self.offset_head(inputs)\n offsets_one_hot = F.one_hot(\n offset_logits.argmax(dim=-1), self.offset_vals.shape[0]\n )\n offsets = (\n offsets_one_hot.float() @ self.offset_vals.to(inputs.device)\n ).squeeze(-1)\n\n outputs = {}\n if gt_masks is not None:\n contact_dirs, outputs['contact_dirs'] = contact_dirs.split(\n [total_grasps, total_gt_grasps], dim=0\n )\n approach_dirs, outputs['approach_dirs'] = approach_dirs.split(\n [total_grasps, total_gt_grasps], dim=0\n )\n offsets = offsets[:total_grasps]\n outputs['offsets'] = offset_logits[total_grasps:]\n \n grasps = build_6d_grasp(contacts, contact_dirs, approach_dirs, offsets)\n grasps = double_split(grasps, num_grasps)\n contacts = double_split(contacts, num_grasps)\n outputs.update({\n 'grasps': grasps,\n 'grasp_confidence': conf_all,\n 'grasp_contacts': contacts,\n 'num_pred_grasps': torch.tensor(\n total_grasps / max(num_objs, 1), device=inputs.device\n )\n })\n if gt_masks is not None:\n outputs['num_gt_grasps'] = torch.tensor(\n total_gt_grasps / max(num_objs, 1), device=inputs.device\n )\n\n if self.param_head is not None:\n outputs['params'] = self.param_head(embedding)\n outputs['release'] = self.release_head(embedding).squeeze(-1)\n return outputs" }, { "identifier": "infer_placements", "path": "m2t2/action_decoder.py", "snippet": "def infer_placements(\n xyz, logits, bottom_center, ee_poses, cam_poses, conf_thresh, height\n):\n rot_prompts = torch.stack([torch.from_numpy(\n tra.euler_matrix(0, 0, 2 * np.pi / logits.shape[1] * i)\n )[:3, :3].float() for i in range(logits.shape[1])]).to(xyz.device)\n rot_prompts = repeat_new_axis(rot_prompts, xyz.shape[1], dim=1)\n\n placements, confidence, contact_points = [], [], []\n for i, (pts, bc, ee_pose, logit) in enumerate(zip(\n xyz, bottom_center, ee_poses, logits\n )):\n conf = logit.sigmoid()\n mask = conf > conf_thresh\n num = list(mask.sum(dim=1))\n rot = rot_prompts[mask]\n offsets = (ee_pose[:3, 3] - bc) @ rot.transpose(1, 2)\n if cam_poses is not None:\n pts = pts @ cam_poses[i, :3, :3].T + cam_poses[i, :3, 3]\n contacts = repeat_new_axis(pts, mask.shape[0], dim=0)[mask]\n place = build_6d_place(contacts, rot, offsets, ee_pose)\n place[:, 2, 3] = place[:, 2, 3] + height\n if cam_poses is not None:\n place = cam_poses[i].inverse() @ place\n placements.append(list(place.split(num)))\n confidence.append(list(conf[mask].split(num)))\n contact_points.append(list(contacts.split(num)))\n outputs = {\n 'placements': placements,\n 'placement_confidence': confidence,\n 'placement_contacts': contact_points\n }\n return outputs" }, { "identifier": "ContactDecoder", "path": "m2t2/contact_decoder.py", "snippet": "class ContactDecoder(nn.Module):\n def __init__(\n self,\n embed_dim: int,\n feedforward_dim: int,\n lang_context_length: int,\n lang_token_dim: int,\n num_grasp_queries: int,\n num_place_queries: int,\n scene_in_features: List[str],\n scene_in_channels: List[int],\n mask_feature: str,\n mask_dim: int,\n place_feature: str,\n place_dim: int,\n num_layers: int,\n num_heads: int,\n use_attn_mask: bool,\n use_task_embed: bool,\n activation: str\n ):\n \"\"\"\n Args:\n activation: activation function for the feedforward network\n embed_dim: transformer feature dimension\n feedforward_dim: hidden dimension of the feedforward network\n lang_context_length: sequence length for language context\n lang_token_dim: dimension of language tokens from pretrained network\n num_layers: number of transformer decoder layers\n num_heads: number of attention heads\n use_attn_mask: mask attention with downsampled instance mask\n predicted by the previous layer\n \"\"\"\n super(ContactDecoder, self).__init__()\n\n self.num_grasp_queries = num_grasp_queries\n self.num_place_queries = num_place_queries\n # learnable grasp query features\n self.query_embed = nn.Embedding(\n num_grasp_queries + num_place_queries, embed_dim\n )\n # learnable query p.e.\n self.query_pos_enc = nn.Embedding(\n num_grasp_queries + num_place_queries + lang_context_length, embed_dim\n )\n self.lang_context_length = lang_context_length\n\n self.place_feature = place_feature\n if place_dim != embed_dim and num_place_queries > 0:\n self.place_embed_proj = nn.Linear(place_dim, embed_dim)\n else:\n self.place_embed_proj = nn.Identity()\n if lang_token_dim != embed_dim:\n self.lang_token_proj = nn.Linear(lang_token_dim, embed_dim)\n else:\n self.lang_token_proj = nn.Identity()\n\n self.scene_in_features = scene_in_features\n self.num_scales = len(scene_in_features)\n # context scale embedding\n self.scale_embed = nn.Embedding(self.num_scales, embed_dim)\n # scene feature projection\n self.scene_feature_proj = nn.ModuleList([\n nn.Conv2d(channel, embed_dim, kernel_size=1)\n if channel != embed_dim else nn.Identity()\n for channel in scene_in_channels\n ])\n # context positional encoding\n self.pe_layer = PositionEncoding3D(embed_dim)\n\n # transformer decoder\n self.embed_dim = embed_dim\n self.num_heads = num_heads\n self.num_layers = num_layers\n self.cross_attention_layers = nn.ModuleList()\n self.self_attention_layers = nn.ModuleList()\n self.ffn_layers = nn.ModuleList()\n for _ in range(num_layers):\n self.cross_attention_layers.append(\n AttentionLayer(embed_dim, num_heads)\n )\n self.self_attention_layers.append(\n AttentionLayer(embed_dim, num_heads)\n )\n self.ffn_layers.append(\n FFNLayer(embed_dim, feedforward_dim, activation)\n )\n self.use_attn_mask = use_attn_mask\n\n # prediction MLPs\n self.mask_feature = mask_feature\n self.mask_dim = mask_dim\n self.norm = nn.LayerNorm(embed_dim)\n num_tasks = 0\n if num_grasp_queries > 0:\n if num_grasp_queries > 1:\n self.object_head = nn.Linear(embed_dim, 1)\n self.grasp_mask_head = MLP(\n embed_dim, embed_dim, mask_dim,\n num_layers=3, activation=activation\n )\n num_tasks += 1\n if num_place_queries > 0:\n self.place_mask_head = MLP(\n embed_dim, embed_dim, mask_dim,\n num_layers=3, activation=activation\n )\n num_tasks += 1\n self.use_task_embed = use_task_embed\n if use_task_embed:\n # learnable task embedding\n self.task_embed = nn.Embedding(num_tasks, embed_dim)\n\n @classmethod\n def from_config(cls, cfg, scene_channels, obj_channels):\n args = {}\n args[\"mask_feature\"] = cfg.mask_feature\n args[\"embed_dim\"] = cfg.embed_dim\n args[\"feedforward_dim\"] = cfg.feedforward_dim\n args[\"lang_context_length\"] = cfg.language_context_length\n args[\"lang_token_dim\"] = cfg.language_token_dim\n args[\"scene_in_features\"] = cfg.in_features[::-1]\n args[\"scene_in_channels\"] = [\n scene_channels[f] for f in cfg.in_features[::-1]\n ]\n args[\"num_grasp_queries\"] = cfg.num_grasp_queries\n args[\"num_place_queries\"] = cfg.num_place_queries\n args[\"mask_dim\"] = scene_channels[cfg.mask_feature]\n args[\"place_feature\"] = cfg.place_feature\n args[\"place_dim\"] = obj_channels[cfg.place_feature]\n args[\"num_layers\"] = cfg.num_layers\n args[\"num_heads\"] = cfg.num_heads\n args[\"use_attn_mask\"] = cfg.use_attn_mask\n args[\"use_task_embed\"] = cfg.use_task_embed\n args[\"activation\"] = cfg.activation\n return cls(**args)\n\n def predict(self, embed, mask_features):\n grasp_embed, place_embed = embed.split(\n [self.num_grasp_queries, self.num_place_queries]\n )\n pred, embed, attn_mask = {}, {}, []\n if grasp_embed.shape[0] > 0:\n embed['grasp'] = grasp_embed.transpose(0, 1)\n if self.num_grasp_queries > 1:\n pred['objectness'] = self.object_head(\n embed['grasp']\n ).squeeze(-1)\n emb = self.grasp_mask_head(embed['grasp'])\n pred['grasping_masks'] = torch.einsum(\n \"bqc,bcn->bqn\", emb, mask_features\n )\n attn_mask.append(pred['grasping_masks'])\n if place_embed.shape[0] > 0:\n embed['place'] = place_embed.transpose(0, 1)\n emb = self.place_mask_head(embed['place'])\n pred['placement_masks'] = torch.einsum(\n \"bqc,bcn->bqn\", emb, mask_features\n )\n attn_mask.append(pred['placement_masks'])\n attn_mask = torch.cat(attn_mask, dim=1).detach()\n return pred, embed, attn_mask\n\n def construct_context(self, features, feature_keys, feature_proj):\n context = [features['features'][f] for f in feature_keys]\n pos_encs, context_sizes = [], []\n for i, f in enumerate(feature_keys):\n pos_enc = self.pe_layer(features['context_pos'][f])\n context_sizes.append(context[i].shape[-1])\n pos_enc = pos_enc.flatten(start_dim=2).permute(2, 0, 1)\n pos_encs.append(pos_enc)\n context[i] = feature_proj[i](context[i].unsqueeze(-1)).squeeze(-1)\n context[i] = context[i] + self.scale_embed.weight[i].unsqueeze(1)\n # NxCxHW -> HWxNxC\n context[i] = context[i].permute(2, 0, 1)\n return context, pos_encs, context_sizes\n\n def forward(self, scene_features, obj_features, lang_tokens=None):\n \"\"\"\n Args:\n scene_features: a dict containing multi-scale feature maps \n from scene point cloud\n obj_features: a dict containing multi-scale feature maps\n from point cloud of object to be placed\n \"\"\"\n context, pos_encs, context_sizes = self.construct_context(\n scene_features, self.scene_in_features, self.scene_feature_proj\n )\n mask_feat = scene_features['features'][self.mask_feature]\n\n grasp_embed, place_embed = self.query_embed.weight.split(\n [self.num_grasp_queries, self.num_place_queries]\n )\n embed, task_id = [], 0\n if grasp_embed.shape[0] > 0:\n if self.use_task_embed:\n grasp_embed = grasp_embed + self.task_embed.weight[task_id]\n embed.append(repeat_new_axis(\n grasp_embed, mask_feat.shape[0], dim=1\n ))\n task_id += 1\n if place_embed.shape[0] > 0:\n place_prompts = obj_features['features'][self.place_feature]\n place_prompts = place_prompts.max(dim=-1)[0]\n place_prompts = self.place_embed_proj(place_prompts)\n if self.use_task_embed:\n place_embed = place_embed + self.task_embed.weight[task_id]\n embed.append(\n place_embed.unsqueeze(1) + place_prompts.unsqueeze(0)\n )\n if lang_tokens is not None:\n embed.append(self.lang_token_proj(lang_tokens).transpose(0, 1))\n embed = torch.cat(embed)\n query_pos_enc = repeat_new_axis(\n self.query_pos_enc.weight, mask_feat.shape[0], dim=1\n )\n\n # initial prediction with learnable query features only (no context)\n embed = self.norm(embed)\n prediction, _, attn_mask = self.predict(\n embed[:embed.shape[0] - self.lang_context_length], mask_feat\n )\n predictions = [prediction]\n\n for i in range(self.num_layers):\n j = i % self.num_scales\n if self.use_attn_mask:\n attn_mask = compute_attention_mask(\n attn_mask, scene_features['sample_ids'],\n context_sizes[j], self.num_heads\n )\n if lang_tokens is not None:\n attn_mask = torch.cat([\n attn_mask, repeat_new_axis(\n torch.zeros_like(attn_mask[:, 0]),\n lang_tokens.shape[1], dim=1\n )\n ], dim=1)\n else:\n attn_mask = None\n context_feat = context[j]\n key_pos_enc = pos_encs[j]\n embed = self.cross_attention_layers[i](\n embed, context_feat, context_feat + key_pos_enc,\n query_pos_enc, key_pos_enc, attn_mask\n )\n embed = self.self_attention_layers[i](\n embed, embed, embed + query_pos_enc,\n query_pos_enc, query_pos_enc\n )\n embed = self.ffn_layers[i](embed)\n\n prediction, embedding, attn_mask = self.predict(\n embed[:embed.shape[0] - self.lang_context_length], mask_feat\n )\n predictions.append(prediction)\n return embedding, predictions" }, { "identifier": "SetCriterion", "path": "m2t2/criterion.py", "snippet": "class SetCriterion(nn.Module):\n \"\"\"This class computes the Hungarian matching loss.\n The process consists of two steps:\n 1) compute 1-1 assignments between outputs of the model and ground\n truth targets (usually, there are more outputs than targets)\n 2) supervise each matched prediction with the corresponding target\n \"\"\"\n\n def __init__(\n self, matcher, deep_supervision, recompute_indices, mask_criterion,\n object_weight, not_object_weight, pseudo_ce_weight\n ):\n \"\"\"Create the criterion.\n Parameters:\n matcher: module to compute 1-1 matching between targets and outputs\n sampler: sample a subset of points to compute mask loss\n deep_supervision: whether to supervise intermediate layer outputs\n recompute_indices: recompute matching for each intermediate layer\n object_weight: weight of the objectness classification loss\n not_object_weight: multiplier for the ce loss of unmatched outputs\n instance_weights: weights of the instance mask loss\n contact_weights: weights of the contact mask loss\n pseudo_ce: use cross entropy with pseudo labels from matcher\n \"\"\"\n super(SetCriterion, self).__init__()\n self.matcher = matcher\n self.deep_supervision = deep_supervision\n self.recompute_indices = recompute_indices\n\n self.object_weight = object_weight\n self.not_object_weight = not_object_weight\n self.mask_criterion = mask_criterion\n self.pseudo_ce_weight = pseudo_ce_weight\n if pseudo_ce_weight > 0:\n self.pseudo_ce_loss = nn.CrossEntropyLoss()\n\n @classmethod\n def from_config(cls, cfg, matcher):\n args = {}\n args['deep_supervision'] = cfg.deep_supervision\n args['recompute_indices'] = cfg.recompute_indices\n args['object_weight'] = cfg.object_weight\n args['not_object_weight'] = cfg.not_object_weight\n args['mask_criterion'] = MaskCriterion.from_config(cfg)\n args['pseudo_ce_weight'] = cfg.pseudo_ce_weight\n return cls(matcher, **args)\n\n def get_pseudo_ce_loss(self, pred_masks, gt_masks, matched_idx):\n B, N, H, W = pred_masks.shape\n pseudo_label = torch.zeros(B, H, W).long()\n pseudo_label = pseudo_label.to(pred_masks.device)\n tgt_mask_any = []\n for i, (tgt_mask, idx) in enumerate(zip(gt_masks, matched_idx)):\n obj_id, y, x = torch.where(tgt_mask > 0)\n pseudo_label[i, y, x] = idx[obj_id]\n tgt_mask_any.append(tgt_mask.any(dim=0))\n tgt_mask_any = torch.stack(tgt_mask_any)\n loss = self.pseudo_ce_loss(\n pred_masks.permute(0, 2, 3, 1)[tgt_mask_any],\n pseudo_label[tgt_mask_any]\n )\n return loss\n\n def get_loss(self, pred, data, matched_idx, layer=None):\n obj_label = torch.zeros_like(pred['objectness'])\n for i, idx in enumerate(matched_idx):\n obj_label[i][idx] = 1\n pos_weight = torch.tensor(1 / self.not_object_weight).to(\n pred['objectness'].device\n )\n loss_obj = bce_loss(\n pred['objectness'], obj_label,\n pos_weight=pos_weight, reduction='none'\n ) * self.not_object_weight\n mask = data['task_is_pick'].unsqueeze(1).float()\n loss_obj = (loss_obj * mask).sum() / torch.clamp(mask.sum(), 1)\n losses = {'objectness': (self.object_weight, loss_obj)}\n\n if self.pseudo_ce_weight > 0:\n pseudo_ce = self.get_pseudo_ce_loss(\n pred['grasping_masks'], data['grasping_masks'], matched_idx\n )\n losses['pseudo_ce'] = (self.pseudo_ce_weight, pseudo_ce)\n\n matched_masks = [mask[idx] for mask, idx in zip(\n pred['grasping_masks'], matched_idx\n )]\n outputs = {'matched_grasping_masks': matched_masks}\n mask_loss, stats = self.mask_criterion(\n 'grasping', torch.cat(matched_masks),\n torch.cat(data['grasping_masks'])\n )\n losses.update(mask_loss)\n outputs.update(stats)\n\n if layer is not None:\n losses = {\n f'layer{layer}/{key}': val for key, val in losses.items()\n }\n return losses, outputs\n\n def forward(self, pred, targets):\n outputs = pred[-1]\n\n # Compute matching between final prediction and the targets\n output_idx, cost_matrices = self.matcher(pred[-1], targets)\n outputs.update({\n 'matched_idx': output_idx, 'cost_matrices': cost_matrices\n })\n\n # Compute losses for the final layer outputs\n losses, stats = self.get_loss(pred[-1], targets, output_idx)\n outputs.update(stats)\n\n if self.deep_supervision and self.training:\n # Compute losses for each intermediate layer outputs\n for i, p in enumerate(pred[:-1]):\n if self.recompute_indices:\n output_idx, _ = self.matcher(p, targets)\n l_dict, _ = self.get_loss(p, targets, output_idx, i + 1)\n losses.update(l_dict)\n outputs[f'layer{i+1}/matched_idx'] = output_idx\n\n return losses, outputs" }, { "identifier": "GraspCriterion", "path": "m2t2/criterion.py", "snippet": "class GraspCriterion(nn.Module):\n def __init__(\n self, adds_criterion, contact_dir_weight, approach_dir_weight,\n offset_weight, param_weight, bin_weights\n ):\n super(GraspCriterion, self).__init__()\n self.adds_criterion = adds_criterion\n self.loss_weights = {\n 'contact_dir': contact_dir_weight,\n 'approach_dir': approach_dir_weight,\n 'offset': offset_weight,\n 'param': param_weight,\n 'release': param_weight\n }\n self.bin_weights = torch.tensor(bin_weights)\n\n @classmethod\n def from_config(cls, cfg):\n args = {}\n args['adds_criterion'] = ADDSCriterion(\n cfg.adds_pred2gt, cfg.adds_gt2pred, cfg.adds_per_obj\n )\n args['contact_dir_weight'] = cfg.contact_dir\n args['approach_dir_weight'] = cfg.approach_dir\n args['offset_weight'] = cfg.offset\n args['param_weight'] = cfg.param\n args['bin_weights'] = cfg.offset_bin_weights\n return cls(**args)\n\n def forward(self, pred, data):\n losses = {}\n losses['contact_dir'] = (1 - (\n pred['contact_dirs'] * data['contact_dirs']\n ).sum(dim=1))\n losses['approach_dir'] = (1 - (\n pred['approach_dirs'] * data['approach_dirs']\n ).sum(dim=1))\n losses['offset'] = cross_entropy(\n pred['offsets'], data['offsets'],\n self.bin_weights.to(pred['offsets'].device), reduction='none'\n )\n if 'params' in data:\n losses['param'] = ((pred['params'] - data['params']) ** 2).mean()\n if 'release' in data:\n losses['release'] = bce_loss(\n pred['release'], data['release'].float()\n ).mean()\n for key in ['contact_dir', 'approach_dir', 'offset']:\n losses[key] = losses[key].sum() / max(losses[key].numel(), 1)\n losses = {\n key: (self.loss_weights[key], losses[key]) for key in losses\n }\n losses.update(self.adds_criterion(\n pred['grasps'], pred['grasp_confidence'],\n data['grasps'], data['inputs'].device\n ))\n return losses" }, { "identifier": "PlaceCriterion", "path": "m2t2/criterion.py", "snippet": "class PlaceCriterion(nn.Module):\n def __init__(self, mask_criterion, deep_supervision):\n super(PlaceCriterion, self).__init__()\n self.mask_criterion = mask_criterion\n self.deep_supervision = deep_supervision\n\n @classmethod\n def from_config(cls, cfg):\n args = {}\n args['mask_criterion'] = MaskCriterion.from_config(cfg)\n args['deep_supervision'] = cfg.deep_supervision\n return cls(**args)\n\n def forward(self, pred, data):\n pred_masks = pred[-1]['placement_masks'][data['task_is_place']]\n target_masks = data['placement_masks'][data['task_is_place']]\n loss_masks = data['placement_region'][data['task_is_place']]\n loss_masks = repeat_new_axis(\n loss_masks, target_masks.shape[1], dim=1\n ) # (B, H, W) -> (B, Q, H, W)\n loss_masks = loss_masks.flatten(0, 1)\n target_masks = target_masks.flatten(0, 1)\n pred_masks = pred_masks.flatten(0, 1)\n losses, stats = self.mask_criterion(\n 'placement', pred_masks, target_masks, loss_masks\n )\n\n if self.deep_supervision and self.training:\n # Compute losses for each intermediate layer outputs\n for i, p in enumerate(pred[:-1]):\n pred_masks = p['placement_masks'][data['task_is_place']]\n pred_masks = pred_masks.flatten(0, 1)\n mask_losses, _ = self.mask_criterion(\n 'placement', pred_masks, target_masks, loss_masks\n )\n mask_losses = {\n f'layer{i+1}/{key}': val\n for key, val in mask_losses.items()\n }\n losses.update(mask_losses)\n return losses, stats" }, { "identifier": "HungarianMatcher", "path": "m2t2/matcher.py", "snippet": "class HungarianMatcher(torch.nn.Module):\n \"\"\"This class computes a 1-to-1 assignment between the targets and the\n network's predictions. The targets only include objects, so in general,\n there are more predictions than targets. The un-matched predictions are\n treated as non-objects).\n \"\"\"\n def __init__(self, object_weight, bce_weight, dice_weight):\n super(HungarianMatcher, self).__init__()\n self.object_weight = object_weight\n self.bce_weight = bce_weight\n self.dice_weight = dice_weight\n\n @classmethod\n def from_config(cls, cfg):\n args = {}\n args['object_weight'] = cfg.object_weight\n args['bce_weight'] = cfg.bce_weight\n args['dice_weight'] = cfg.dice_weight\n return cls(**args)\n\n @torch.no_grad()\n def forward(self, outputs, data):\n \"\"\"Performs the matching\n Params:\n outputs: a dict that contains these entries:\n \"objectness\": dim [batch_size, num_queries]\n logits for the objectness score\n \"instance_masks\": dim [batch_size, num_queries, ...]\n predicted object instance masks\n \"contact_masks\": dim [batch_size, num_queries, ...]\n predicted grasp contact masks\n targets: a dict that contains these entries:\n \"instance_masks\": a list of batch_size tensors\n ground truth object instance masks\n \"contact_masks\": a list of batch_size tensors\n ground truth grasp contact masks\n Returns:\n indices: a list of length batch_size, containing indices of the\n predictions that match the best with each target\n \"\"\"\n indices, cost_matrices = [], []\n for i in range(len(outputs['objectness'])):\n # We approximate objectness NLL loss with 1 - prob.\n # The 1 is a constant that can be ommitted.\n cost = self.object_weight * (\n -outputs['objectness'][i:i+1].T.sigmoid()\n ) + self.bce_weight * bce_loss_matrix(\n outputs['grasping_masks'][i], data['grasping_masks'][i]\n ) + self.dice_weight * dice_loss_matrix(\n outputs['grasping_masks'][i], data['grasping_masks'][i]\n )\n output_idx, target_idx = linear_sum_assignment(cost.cpu().numpy())\n output_idx = output_idx[np.argsort(target_idx)]\n indices.append(torch.from_numpy(output_idx).long().to(cost.device))\n cost_matrices.append(cost)\n return indices, cost_matrices" }, { "identifier": "PointNet2MSG", "path": "m2t2/pointnet2.py", "snippet": "class PointNet2MSG(PointNet2Base):\n def __init__(\n self, num_points, downsample, radius,\n radius_mult, use_rgb=True, norm='BN'\n ):\n super(PointNet2MSG, self).__init__()\n\n self.use_rgb = use_rgb\n c_in = 3 if use_rgb else 0\n num_points = num_points // downsample\n self.SA_modules.append(\n PointnetSAModuleMSG(\n npoint=num_points,\n radii=[radius, radius * radius_mult],\n nsamples=[16, 32],\n mlps=[[c_in, 32, 32, 64], [c_in, 32, 32, 64]],\n norm=norm\n )\n )\n c_out_0 = 64 + 64\n radius = radius * radius_mult\n\n num_points = num_points // downsample\n self.SA_modules.append(\n PointnetSAModuleMSG(\n npoint=num_points,\n radii=[radius, radius * radius_mult],\n nsamples=[16, 32],\n mlps=[[c_out_0, 64, 64, 128], [c_out_0, 64, 64, 128]],\n norm=norm\n )\n )\n c_out_1 = 128 + 128\n radius = radius * radius_mult\n\n num_points = num_points // downsample\n self.SA_modules.append(\n PointnetSAModuleMSG(\n npoint=num_points,\n radii=[radius, radius * radius_mult],\n nsamples=[16, 32],\n mlps=[[c_out_1, 128, 128, 256], [c_out_1, 128, 128, 256]],\n norm=norm\n )\n )\n c_out_2 = 256 + 256\n radius = radius * radius_mult\n\n num_points = num_points // downsample\n self.SA_modules.append(\n PointnetSAModuleMSG(\n npoint=num_points,\n radii=[radius, radius * radius_mult],\n nsamples=[16, 32],\n mlps=[[c_out_2, 256, 256, 512], [c_out_2, 256, 256, 512]],\n norm=norm\n )\n )\n c_out_3 = 512 + 512\n\n self.FP_modules.append(\n PointnetFPModule(mlp=[256 + c_in, 128, 128])\n )\n self.FP_modules.append(\n PointnetFPModule(mlp=[512 + c_out_0, 256, 256])\n )\n self.FP_modules.append(\n PointnetFPModule(mlp=[512 + c_out_1, 512, 512])\n )\n self.FP_modules.append(\n PointnetFPModule(mlp=[c_out_3 + c_out_2, 512, 512])\n )\n\n self.out_channels = {\n 'res0': 128, 'res1': 256, 'res2': 512, 'res3': 512, 'res4': 1024\n }\n\n @classmethod\n def from_config(cls, cfg):\n args = {}\n args['num_points'] = cfg.num_points\n args['downsample'] = cfg.downsample\n args['radius'] = cfg.radius\n args['radius_mult'] = cfg.radius_mult\n args['use_rgb'] = cfg.use_rgb\n return cls(**args)" }, { "identifier": "PointNet2MSGCls", "path": "m2t2/pointnet2.py", "snippet": "class PointNet2MSGCls(PointNet2Base):\n def __init__(\n self, num_points, downsample, radius,\n radius_mult, use_rgb=True, norm='BN'\n ):\n super(PointNet2MSGCls, self).__init__()\n\n self.use_rgb = use_rgb\n c_in = 3 if use_rgb else 0\n num_points = num_points // downsample\n self.SA_modules.append(\n PointnetSAModuleMSG(\n npoint=num_points,\n radii=[radius, radius * radius_mult],\n nsamples=[16, 32],\n mlps=[[c_in, 32, 32, 64], [c_in, 32, 32, 64]],\n norm=norm\n )\n )\n c_out_0 = 64 + 64\n radius = radius * radius_mult\n\n num_points = num_points // downsample\n self.SA_modules.append(\n PointnetSAModuleMSG(\n npoint=num_points,\n radii=[radius, radius * radius_mult],\n nsamples=[16, 32],\n mlps=[[c_out_0, 64, 64, 128], [c_out_0, 64, 64, 128]],\n norm=norm\n )\n )\n c_out_1 = 128 + 128\n radius = radius * radius_mult\n\n num_points = num_points // downsample\n self.SA_modules.append(\n PointnetSAModuleMSG(\n npoint=num_points,\n radii=[radius, radius * radius_mult],\n nsamples=[16, 32],\n mlps=[[c_out_1, 128, 128, 256], [c_out_1, 128, 128, 256]],\n norm=norm\n )\n )\n c_out_2 = 256 + 256\n self.SA_modules.append(\n PointnetSAModule(mlp=[c_out_2, 256, 256, 512], norm=norm)\n )\n\n self.out_channels = {\n 'res0': c_in, 'res1': 128, 'res2': 256, 'res3': 512, 'res4': 512\n }\n\n @classmethod\n def from_config(cls, cfg):\n args = {}\n args['num_points'] = cfg.num_points\n args['downsample'] = cfg.downsample\n args['radius'] = cfg.radius\n args['radius_mult'] = cfg.radius_mult\n args['use_rgb'] = cfg.use_rgb\n return cls(**args)" } ]
import torch import torch.nn as nn from m2t2.action_decoder import ActionDecoder, infer_placements from m2t2.contact_decoder import ContactDecoder from m2t2.criterion import SetCriterion, GraspCriterion, PlaceCriterion from m2t2.matcher import HungarianMatcher from m2t2.pointnet2 import PointNet2MSG, PointNet2MSGCls
9,379
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # # Author: Wentao Yuan ''' Top-level M2T2 network. ''' class M2T2(nn.Module): def __init__( self, backbone: nn.Module, transformer: nn.Module, object_encoder: nn.Module = None, grasp_mlp: nn.Module = None, set_criterion: nn.Module = None, grasp_criterion: nn.Module = None, place_criterion: nn.Module = None ): super(M2T2, self).__init__() self.backbone = backbone self.object_encoder = object_encoder self.transformer = transformer self.grasp_mlp = grasp_mlp self.set_criterion = set_criterion self.grasp_criterion = grasp_criterion self.place_criterion = place_criterion @classmethod def from_config(cls, cfg): args = {} args['backbone'] = PointNet2MSG.from_config(cfg.scene_encoder) channels = args['backbone'].out_channels obj_channels = None if cfg.contact_decoder.num_place_queries > 0:
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # # Author: Wentao Yuan ''' Top-level M2T2 network. ''' class M2T2(nn.Module): def __init__( self, backbone: nn.Module, transformer: nn.Module, object_encoder: nn.Module = None, grasp_mlp: nn.Module = None, set_criterion: nn.Module = None, grasp_criterion: nn.Module = None, place_criterion: nn.Module = None ): super(M2T2, self).__init__() self.backbone = backbone self.object_encoder = object_encoder self.transformer = transformer self.grasp_mlp = grasp_mlp self.set_criterion = set_criterion self.grasp_criterion = grasp_criterion self.place_criterion = place_criterion @classmethod def from_config(cls, cfg): args = {} args['backbone'] = PointNet2MSG.from_config(cfg.scene_encoder) channels = args['backbone'].out_channels obj_channels = None if cfg.contact_decoder.num_place_queries > 0:
args['object_encoder'] = PointNet2MSGCls.from_config(
8
2023-11-03 22:32:05+00:00
12k
Codra-Ingenierie-Informatique/DataLab
cdl/core/computation/signal.py
[ { "identifier": "fit", "path": "cdl/algorithms/fit.py", "snippet": "class FitModel(abc.ABC):\nclass GaussianModel(FitModel):\nclass LorentzianModel(FitModel):\nclass VoigtModel(FitModel):\n def func(cls, x, amp, sigma, x0, y0):\n def get_amp_from_amplitude(\n cls, amplitude, sigma\n ): # pylint: disable=unused-argument\n def amplitude(cls, amp, sigma):\n def fwhm(cls, amp, sigma):\n def half_max_segment(cls, amp, sigma, x0, y0):\n def func(cls, x, amp, sigma, x0, y0):\n def get_amp_from_amplitude(cls, amplitude, sigma):\n def amplitude(cls, amp, sigma):\n def fwhm(cls, amp, sigma):\n def func(cls, x, amp, sigma, x0, y0):\n def get_amp_from_amplitude(cls, amplitude, sigma):\n def amplitude(cls, amp, sigma):\n def fwhm(cls, amp, sigma):\n def func(cls, x, amp, sigma, x0, y0):\n def fwhm(cls, amp, sigma):" }, { "identifier": "derivative", "path": "cdl/algorithms/signal.py", "snippet": "def derivative(x: np.ndarray, y: np.ndarray) -> np.ndarray:\n \"\"\"Compute numerical derivative.\n\n Args:\n x (numpy.ndarray): X data\n y (numpy.ndarray): Y data\n\n Returns:\n np.ndarray: Numerical derivative\n \"\"\"\n dy = np.zeros_like(y)\n dy[0:-1] = np.diff(y) / np.diff(x)\n dy[-1] = (y[-1] - y[-2]) / (x[-1] - x[-2])\n return dy" }, { "identifier": "interpolate", "path": "cdl/algorithms/signal.py", "snippet": "def interpolate(\n x: np.ndarray,\n y: np.ndarray,\n xnew: np.ndarray,\n method: str,\n fill_value: float | None = None,\n) -> np.ndarray:\n \"\"\"Interpolate data.\n\n Args:\n x (numpy.ndarray): X data\n y (numpy.ndarray): Y data\n xnew (numpy.ndarray): New X data\n method (str): Interpolation method. Valid values are 'linear', 'spline',\n 'quadratic', 'cubic', 'barycentric', 'pchip'\n fill_value (float | None): Fill value. Defaults to None.\n This value is used to fill in for requested points outside of the\n X data range. It is only used if the method argument is 'linear',\n 'cubic' or 'pchip'.\n \"\"\"\n interpolator_extrap = None\n if method == \"linear\":\n # Linear interpolation using NumPy's interp function:\n ynew = np.interp(xnew, x, y, left=fill_value, right=fill_value)\n elif method == \"spline\":\n # Spline using 1-D interpolation with SciPy's interpolate package:\n knots, coeffs, degree = scipy.interpolate.splrep(x, y, s=0)\n ynew = scipy.interpolate.splev(xnew, (knots, coeffs, degree), der=0)\n elif method == \"quadratic\":\n # Quadratic interpolation using NumPy's polyval function:\n coeffs = np.polyfit(x, y, 2)\n ynew = np.polyval(coeffs, xnew)\n elif method == \"cubic\":\n # Cubic interpolation using SciPy's Akima1DInterpolator class:\n interpolator_extrap = scipy.interpolate.Akima1DInterpolator(x, y)\n elif method == \"barycentric\":\n # Barycentric interpolation using SciPy's BarycentricInterpolator class:\n interpolator = scipy.interpolate.BarycentricInterpolator(x, y)\n ynew = interpolator(xnew)\n elif method == \"pchip\":\n # PCHIP interpolation using SciPy's PchipInterpolator class:\n interpolator_extrap = scipy.interpolate.PchipInterpolator(x, y)\n else:\n raise ValueError(f\"Invalid interpolation method {method}\")\n if interpolator_extrap is not None:\n ynew = interpolator_extrap(xnew, extrapolate=fill_value is None)\n if fill_value is not None:\n ynew[xnew < x[0]] = fill_value\n ynew[xnew > x[-1]] = fill_value\n return ynew" }, { "identifier": "moving_average", "path": "cdl/algorithms/signal.py", "snippet": "def moving_average(y: np.ndarray, n: int) -> np.ndarray:\n \"\"\"Compute moving average.\n\n Args:\n y (numpy.ndarray): Input array\n n (int): Window size\n\n Returns:\n np.ndarray: Moving average\n \"\"\"\n y_padded = np.pad(y, (n // 2, n - 1 - n // 2), mode=\"edge\")\n return np.convolve(y_padded, np.ones((n,)) / n, mode=\"valid\")" }, { "identifier": "normalize", "path": "cdl/algorithms/signal.py", "snippet": "def normalize(yin: np.ndarray, parameter: str = \"maximum\") -> np.ndarray:\n \"\"\"Normalize input array to a given parameter.\n\n Args:\n yin (numpy.ndarray): Input array\n parameter (str | None): Normalization parameter. Defaults to \"maximum\".\n Supported values: 'maximum', 'amplitude', 'sum', 'energy'\n\n Returns:\n np.ndarray: Normalized array\n \"\"\"\n axis = len(yin.shape) - 1\n if parameter == \"maximum\":\n maximum = np.max(yin, axis)\n if axis == 1:\n maximum = maximum.reshape((len(maximum), 1))\n maxarray = np.tile(maximum, yin.shape[axis]).reshape(yin.shape)\n return yin / maxarray\n if parameter == \"amplitude\":\n ytemp = np.array(yin, copy=True)\n minimum = np.min(yin, axis)\n if axis == 1:\n minimum = minimum.reshape((len(minimum), 1))\n ytemp -= minimum\n return normalize(ytemp, parameter=\"maximum\")\n if parameter == \"sum\":\n return yin / yin.sum()\n if parameter == \"energy\":\n return yin / (yin * yin.conjugate()).sum()\n raise RuntimeError(f\"Unsupported parameter {parameter}\")" }, { "identifier": "peak_indexes", "path": "cdl/algorithms/signal.py", "snippet": "def peak_indexes(\n y, thres: float = 0.3, min_dist: int = 1, thres_abs: bool = False\n) -> np.ndarray:\n # Copyright (c) 2014 Lucas Hermann Negri\n # Unmodified code snippet from PeakUtils 1.3.0\n \"\"\"Peak detection routine.\n\n Finds the numeric index of the peaks in *y* by taking its first order\n difference. By using *thres* and *min_dist* parameters, it is possible\n to reduce the number of detected peaks. *y* must be signed.\n\n Parameters\n ----------\n y : ndarray (signed)\n 1D amplitude data to search for peaks.\n thres : float between [0., 1.]\n Normalized threshold. Only the peaks with amplitude higher than the\n threshold will be detected.\n min_dist : int\n Minimum distance between each detected peak. The peak with the highest\n amplitude is preferred to satisfy this constraint.\n thres_abs: boolean\n If True, the thres value will be interpreted as an absolute value,\n instead of a normalized threshold.\n\n Returns\n -------\n ndarray\n Array containing the numeric indexes of the peaks that were detected\n \"\"\"\n if isinstance(y, np.ndarray) and np.issubdtype(y.dtype, np.unsignedinteger):\n raise ValueError(\"y must be signed\")\n\n if not thres_abs:\n thres = thres * (np.max(y) - np.min(y)) + np.min(y)\n\n # compute first order difference\n dy = np.diff(y)\n\n # propagate left and right values successively to fill all plateau pixels\n # (0-value)\n (zeros,) = np.where(dy == 0)\n\n # check if the signal is totally flat\n if len(zeros) == len(y) - 1:\n return np.array([])\n\n if len(zeros):\n # compute first order difference of zero indexes\n zeros_diff = np.diff(zeros)\n # check when zeros are not chained together\n (zeros_diff_not_one,) = np.add(np.where(zeros_diff != 1), 1)\n # make an array of the chained zero indexes\n zero_plateaus = np.split(zeros, zeros_diff_not_one)\n\n # fix if leftmost value in dy is zero\n if zero_plateaus[0][0] == 0:\n dy[zero_plateaus[0]] = dy[zero_plateaus[0][-1] + 1]\n zero_plateaus.pop(0)\n\n # fix if rightmost value of dy is zero\n if len(zero_plateaus) > 0 and zero_plateaus[-1][-1] == len(dy) - 1:\n dy[zero_plateaus[-1]] = dy[zero_plateaus[-1][0] - 1]\n zero_plateaus.pop(-1)\n\n # for each chain of zero indexes\n for plateau in zero_plateaus:\n median = np.median(plateau)\n # set leftmost values to leftmost non zero values\n dy[plateau[plateau < median]] = dy[plateau[0] - 1]\n # set rightmost and middle values to rightmost non zero values\n dy[plateau[plateau >= median]] = dy[plateau[-1] + 1]\n\n # find the peaks by using the first order difference\n peaks = np.where(\n (np.hstack([dy, 0.0]) < 0.0)\n & (np.hstack([0.0, dy]) > 0.0)\n & (np.greater(y, thres))\n )[0]\n\n # handle multiple peaks, respecting the minimum distance\n if peaks.size > 1 and min_dist > 1:\n highest = peaks[np.argsort(y[peaks])][::-1]\n rem = np.ones(y.size, dtype=bool)\n rem[peaks] = False\n\n for peak in highest:\n if not rem[peak]:\n sl = slice(max(0, peak - min_dist), peak + min_dist + 1)\n rem[sl] = True\n rem[peak] = False\n\n peaks = np.arange(y.size)[~rem]\n\n return peaks" }, { "identifier": "xpeak", "path": "cdl/algorithms/signal.py", "snippet": "def xpeak(x: np.ndarray, y: np.ndarray) -> float:\n \"\"\"Return default peak X-position (assuming a single peak).\n\n Args:\n x (numpy.ndarray): X data\n y (numpy.ndarray): Y data\n\n Returns:\n float: Peak X-position\n \"\"\"\n peaks = peak_indexes(y)\n if peaks.size == 1:\n return x[peaks[0]]\n return np.average(x, weights=y)" }, { "identifier": "xy_fft", "path": "cdl/algorithms/signal.py", "snippet": "def xy_fft(\n x: np.ndarray, y: np.ndarray, shift: bool = True\n) -> tuple[np.ndarray, np.ndarray]:\n \"\"\"Compute FFT on X,Y data.\n\n Args:\n x (numpy.ndarray): X data\n y (numpy.ndarray): Y data\n shift (bool | None): Shift the zero frequency to the center of the spectrum.\n Defaults to True.\n\n Returns:\n tuple[np.ndarray, np.ndarray]: X,Y data\n \"\"\"\n y1 = np.fft.fft(y)\n x1 = np.fft.fftfreq(x.shape[-1], d=x[1] - x[0])\n if shift:\n x1 = np.fft.fftshift(x1)\n y1 = np.fft.fftshift(y1)\n return x1, y1" }, { "identifier": "xy_ifft", "path": "cdl/algorithms/signal.py", "snippet": "def xy_ifft(\n x: np.ndarray, y: np.ndarray, shift: bool = True\n) -> tuple[np.ndarray, np.ndarray]:\n \"\"\"Compute iFFT on X,Y data.\n\n Args:\n x (numpy.ndarray): X data\n y (numpy.ndarray): Y data\n shift (bool | None): Shift the zero frequency to the center of the spectrum.\n Defaults to True.\n\n Returns:\n tuple[np.ndarray, np.ndarray]: X,Y data\n \"\"\"\n x1 = np.fft.fftfreq(x.shape[-1], d=x[1] - x[0])\n if shift:\n x1 = np.fft.ifftshift(x1)\n y = np.fft.ifftshift(y)\n y1 = np.fft.ifft(y)\n return x1, y1.real" }, { "identifier": "_", "path": "cdl/config.py", "snippet": "CONF_VERSION = \"1.0.0\"\nAPP_NAME = \"DataLab\"\nMOD_NAME = \"cdl\"\nAPP_DESC = _(\"\"\"DataLab is a generic signal and image processing platform\"\"\")\nAPP_PATH = osp.dirname(__file__)\nDEBUG = os.environ.get(\"DEBUG\", \"\").lower() in (\"1\", \"true\")\nTEST_SEGFAULT_ERROR = len(os.environ.get(\"TEST_SEGFAULT_ERROR\", \"\")) > 0\nDATETIME_FORMAT = \"%d/%m/%Y - %H:%M:%S\"\nDATAPATH = configtools.get_module_data_path(MOD_NAME, \"data\")\nSHOTPATH = osp.join(\n configtools.get_module_data_path(MOD_NAME), os.pardir, \"doc\", \"images\", \"shots\"\n)\nOTHER_PLUGINS_PATHLIST = [configtools.get_module_data_path(MOD_NAME, \"plugins\")]\nIS_FROZEN = is_frozen(MOD_NAME)\nPLOTPY_DEFAULTS = {\n \"plot\": {\n # \"antialiasing\": False,\n # \"title/font/size\": 12,\n # \"title/font/bold\": False,\n # \"marker/curve/text/font/size\": 8,\n # \"marker/curve/text/font/family\": \"default\",\n # \"marker/curve/text/font/bold\": False,\n # \"marker/curve/text/font/italic\": False,\n \"marker/curve/text/textcolor\": \"black\",\n # \"marker/curve/text/background_color\": \"#ffffff\",\n # \"marker/curve/text/background_alpha\": 0.8,\n # \"marker/cross/text/font/family\": \"default\",\n # \"marker/cross/text/font/size\": 8,\n # \"marker/cross/text/font/bold\": False,\n # \"marker/cross/text/font/italic\": False,\n \"marker/cross/text/textcolor\": \"black\",\n # \"marker/cross/text/background_color\": \"#ffffff\",\n \"marker/cross/text/background_alpha\": 0.7,\n # \"marker/cross/line/style\": \"DashLine\",\n # \"marker/cross/line/color\": \"yellow\",\n # \"marker/cross/line/width\": 1,\n # \"marker/cursor/text/font/size\": 8,\n # \"marker/cursor/text/font/family\": \"default\",\n # \"marker/cursor/text/font/bold\": False,\n # \"marker/cursor/text/font/italic\": False,\n # \"marker/cursor/text/textcolor\": \"#ff9393\",\n # \"marker/cursor/text/background_color\": \"#ffffff\",\n # \"marker/cursor/text/background_alpha\": 0.8,\n \"shape/drag/symbol/marker\": \"NoSymbol\",\n \"shape/mask/symbol/size\": 5,\n \"shape/mask/sel_symbol/size\": 8,\n # -----------------------------------------------------------------------------\n # Annotated shape style for annotations:\n \"shape/annotation/line/style\": \"SolidLine\",\n \"shape/annotation/line/color\": \"#ffff00\",\n \"shape/annotation/line/width\": 1,\n \"shape/annotation/fill/style\": \"SolidPattern\",\n \"shape/annotation/fill/color\": MAIN_BG_COLOR,\n \"shape/annotation/fill/alpha\": 0.1,\n \"shape/annotation/symbol/marker\": \"Rect\",\n \"shape/annotation/symbol/size\": 3,\n \"shape/annotation/symbol/edgecolor\": \"#ffff00\",\n \"shape/annotation/symbol/facecolor\": \"#ffff00\",\n \"shape/annotation/symbol/alpha\": 1.0,\n \"shape/annotation/sel_line/style\": \"SolidLine\",\n \"shape/annotation/sel_line/color\": \"#00ff00\",\n \"shape/annotation/sel_line/width\": 1,\n \"shape/annotation/sel_fill/style\": \"SolidPattern\",\n \"shape/annotation/sel_fill/color\": MAIN_BG_COLOR,\n \"shape/annotation/sel_fill/alpha\": 0.1,\n \"shape/annotation/sel_symbol/marker\": \"Rect\",\n \"shape/annotation/sel_symbol/size\": 9,\n \"shape/annotation/sel_symbol/edgecolor\": \"#00aa00\",\n \"shape/annotation/sel_symbol/facecolor\": \"#00ff00\",\n \"shape/annotation/sel_symbol/alpha\": 0.7,\n # -----------------------------------------------------------------------------\n # Annotated shape style for result shapes / signals:\n \"shape/result/s/line/style\": \"SolidLine\",\n \"shape/result/s/line/color\": MAIN_FG_COLOR,\n \"shape/result/s/line/width\": 1,\n \"shape/result/s/fill/style\": \"SolidPattern\",\n \"shape/result/s/fill/color\": MAIN_BG_COLOR,\n \"shape/result/s/fill/alpha\": 0.1,\n \"shape/result/s/symbol/marker\": \"XCross\",\n \"shape/result/s/symbol/size\": 7,\n \"shape/result/s/symbol/edgecolor\": MAIN_FG_COLOR,\n \"shape/result/s/symbol/facecolor\": MAIN_FG_COLOR,\n \"shape/result/s/symbol/alpha\": 1.0,\n \"shape/result/s/sel_line/style\": \"SolidLine\",\n \"shape/result/s/sel_line/color\": \"#00ff00\",\n \"shape/result/s/sel_line/width\": 1,\n \"shape/result/s/sel_fill/style\": \"SolidPattern\",\n \"shape/result/s/sel_fill/color\": MAIN_BG_COLOR,\n \"shape/result/s/sel_fill/alpha\": 0.1,\n \"shape/result/s/sel_symbol/marker\": \"Rect\",\n \"shape/result/s/sel_symbol/size\": 9,\n \"shape/result/s/sel_symbol/edgecolor\": \"#00aa00\",\n \"shape/result/s/sel_symbol/facecolor\": \"#00ff00\",\n \"shape/result/s/sel_symbol/alpha\": 0.7,\n # -----------------------------------------------------------------------------\n # Annotated shape style for result shapes / images:\n \"shape/result/i/line/style\": \"SolidLine\",\n \"shape/result/i/line/color\": \"#ffff00\",\n \"shape/result/i/line/width\": 1,\n \"shape/result/i/fill/style\": \"SolidPattern\",\n \"shape/result/i/fill/color\": MAIN_BG_COLOR,\n \"shape/result/i/fill/alpha\": 0.1,\n \"shape/result/i/symbol/marker\": \"Rect\",\n \"shape/result/i/symbol/size\": 3,\n \"shape/result/i/symbol/edgecolor\": \"#ffff00\",\n \"shape/result/i/symbol/facecolor\": \"#ffff00\",\n \"shape/result/i/symbol/alpha\": 1.0,\n \"shape/result/i/sel_line/style\": \"SolidLine\",\n \"shape/result/i/sel_line/color\": \"#00ff00\",\n \"shape/result/i/sel_line/width\": 1,\n \"shape/result/i/sel_fill/style\": \"SolidPattern\",\n \"shape/result/i/sel_fill/color\": MAIN_BG_COLOR,\n \"shape/result/i/sel_fill/alpha\": 0.1,\n \"shape/result/i/sel_symbol/marker\": \"Rect\",\n \"shape/result/i/sel_symbol/size\": 9,\n \"shape/result/i/sel_symbol/edgecolor\": \"#00aa00\",\n \"shape/result/i/sel_symbol/facecolor\": \"#00ff00\",\n \"shape/result/i/sel_symbol/alpha\": 0.7,\n # -----------------------------------------------------------------------------\n },\n}\ndef is_frozen(module_name: str) -> bool:\ndef get_mod_source_dir() -> str | None:\n def get_def_dict(cls, category: str) -> dict:\n def set_def_dict(cls, category: str, def_dict: dict) -> None:\ndef get_old_log_fname(fname):\ndef initialize():\ndef reset():\nclass MainSection(conf.Section, metaclass=conf.SectionMeta):\nclass ConsoleSection(conf.Section, metaclass=conf.SectionMeta):\nclass IOSection(conf.Section, metaclass=conf.SectionMeta):\nclass ProcSection(conf.Section, metaclass=conf.SectionMeta):\nclass ViewSection(conf.Section, metaclass=conf.SectionMeta):\nclass Conf(conf.Configuration, metaclass=conf.ConfMeta):" }, { "identifier": "ClipParam", "path": "cdl/core/computation/base.py", "snippet": "class ClipParam(gds.DataSet):\n \"\"\"Data clipping parameters\"\"\"\n\n value = gds.FloatItem(_(\"Clipping value\"))" }, { "identifier": "FFTParam", "path": "cdl/core/computation/base.py", "snippet": "class FFTParam(gds.DataSet):\n \"\"\"FFT parameters\"\"\"\n\n shift = gds.BoolItem(_(\"Shift\"), help=_(\"Shift zero frequency to center\"))" }, { "identifier": "GaussianParam", "path": "cdl/core/computation/base.py", "snippet": "class GaussianParam(gds.DataSet):\n \"\"\"Gaussian filter parameters\"\"\"\n\n sigma = gds.FloatItem(\"σ\", default=1.0)" }, { "identifier": "MovingAverageParam", "path": "cdl/core/computation/base.py", "snippet": "class MovingAverageParam(gds.DataSet):\n \"\"\"Moving average parameters\"\"\"\n\n n = gds.IntItem(_(\"Size of the moving window\"), default=3, min=1)" }, { "identifier": "MovingMedianParam", "path": "cdl/core/computation/base.py", "snippet": "class MovingMedianParam(gds.DataSet):\n \"\"\"Moving median parameters\"\"\"\n\n n = gds.IntItem(_(\"Size of the moving window\"), default=3, min=1, even=False)" }, { "identifier": "ThresholdParam", "path": "cdl/core/computation/base.py", "snippet": "class ThresholdParam(gds.DataSet):\n \"\"\"Threshold parameters\"\"\"\n\n value = gds.FloatItem(_(\"Threshold\"))" }, { "identifier": "SignalObj", "path": "cdl/core/model/signal.py", "snippet": "class SignalObj(gds.DataSet, base.BaseObj):\n \"\"\"Signal object\"\"\"\n\n PREFIX = \"s\"\n CONF_FMT = Conf.view.sig_format\n DEFAULT_FMT = \"g\"\n VALID_DTYPES = (np.float32, np.float64, np.complex128)\n\n uuid = gds.StringItem(\"UUID\").set_prop(\"display\", hide=True)\n\n _tabs = gds.BeginTabGroup(\"all\")\n\n _datag = gds.BeginGroup(_(\"Data and metadata\"))\n title = gds.StringItem(_(\"Signal title\"), default=_(\"Untitled\"))\n xydata = gds.FloatArrayItem(_(\"Data\"), transpose=True, minmax=\"rows\")\n metadata = gds.DictItem(_(\"Metadata\"), default={})\n _e_datag = gds.EndGroup(_(\"Data and metadata\"))\n\n _unitsg = gds.BeginGroup(_(\"Titles and units\"))\n title = gds.StringItem(_(\"Signal title\"), default=_(\"Untitled\"))\n _tabs_u = gds.BeginTabGroup(\"units\")\n _unitsx = gds.BeginGroup(_(\"X-axis\"))\n xlabel = gds.StringItem(_(\"Title\"), default=\"\")\n xunit = gds.StringItem(_(\"Unit\"), default=\"\")\n _e_unitsx = gds.EndGroup(_(\"X-axis\"))\n _unitsy = gds.BeginGroup(_(\"Y-axis\"))\n ylabel = gds.StringItem(_(\"Title\"), default=\"\")\n yunit = gds.StringItem(_(\"Unit\"), default=\"\")\n _e_unitsy = gds.EndGroup(_(\"Y-axis\"))\n _e_tabs_u = gds.EndTabGroup(\"units\")\n _e_unitsg = gds.EndGroup(_(\"Titles and units\"))\n\n _e_tabs = gds.EndTabGroup(\"all\")\n\n def __init__(self, title=None, comment=None, icon=\"\"):\n \"\"\"Constructor\n\n Args:\n title (str): title\n comment (str): comment\n icon (str): icon\n \"\"\"\n gds.DataSet.__init__(self, title, comment, icon)\n base.BaseObj.__init__(self)\n self.regenerate_uuid()\n\n def regenerate_uuid(self):\n \"\"\"Regenerate UUID\n\n This method is used to regenerate UUID after loading the object from a file.\n This is required to avoid UUID conflicts when loading objects from file\n without clearing the workspace first.\n \"\"\"\n self.uuid = str(uuid4())\n\n def copy(\n self, title: str | None = None, dtype: np.dtype | None = None\n ) -> SignalObj:\n \"\"\"Copy object.\n\n Args:\n title (str): title\n dtype (numpy.dtype): data type\n\n Returns:\n SignalObj: copied object\n \"\"\"\n title = self.title if title is None else title\n obj = SignalObj(title=title)\n obj.title = title\n if dtype not in (None, float, complex, np.complex128):\n raise RuntimeError(\"Signal data only supports float64/complex128 dtype\")\n obj.metadata = deepcopy(self.metadata)\n obj.xydata = np.array(self.xydata, copy=True, dtype=dtype)\n return obj\n\n def set_data_type(self, dtype: np.dtype) -> None: # pylint: disable=unused-argument\n \"\"\"Change data type.\n\n Args:\n dtype (numpy.dtype): data type\n \"\"\"\n raise RuntimeError(\"Setting data type is not support for signals\")\n\n def set_xydata(\n self,\n x: np.ndarray | list,\n y: np.ndarray | list,\n dx: np.ndarray | list | None = None,\n dy: np.ndarray | list | None = None,\n ) -> None:\n \"\"\"Set xy data\n\n Args:\n x (numpy.ndarray): x data\n y (numpy.ndarray): y data\n dx (numpy.ndarray): dx data (optional: error bars)\n dy (numpy.ndarray): dy data (optional: error bars)\n \"\"\"\n if x is not None:\n x = np.array(x)\n if y is not None:\n y = np.array(y)\n if dx is not None:\n dx = np.array(dx)\n if dy is not None:\n dy = np.array(dy)\n if dx is None and dy is None:\n self.xydata = np.vstack([x, y])\n else:\n if dx is None:\n dx = np.zeros_like(dy)\n if dy is None:\n dy = np.zeros_like(dx)\n self.xydata = np.vstack((x, y, dx, dy))\n\n def __get_x(self) -> np.ndarray | None:\n \"\"\"Get x data\"\"\"\n if self.xydata is not None:\n return self.xydata[0]\n return None\n\n def __set_x(self, data) -> None:\n \"\"\"Set x data\"\"\"\n self.xydata[0] = np.array(data)\n\n def __get_y(self) -> np.ndarray | None:\n \"\"\"Get y data\"\"\"\n if self.xydata is not None:\n return self.xydata[1]\n return None\n\n def __set_y(self, data) -> None:\n \"\"\"Set y data\"\"\"\n self.xydata[1] = np.array(data)\n\n def __get_dx(self) -> np.ndarray | None:\n \"\"\"Get dx data\"\"\"\n if self.xydata is not None and len(self.xydata) > 2:\n return self.xydata[2]\n return None\n\n def __set_dx(self, data) -> None:\n \"\"\"Set dx data\"\"\"\n if self.xydata is not None and len(self.xydata) > 2:\n self.xydata[2] = np.array(data)\n else:\n raise ValueError(\"dx data not available\")\n\n def __get_dy(self) -> np.ndarray | None:\n \"\"\"Get dy data\"\"\"\n if self.xydata is not None and len(self.xydata) > 3:\n return self.xydata[3]\n return None\n\n def __set_dy(self, data) -> None:\n \"\"\"Set dy data\"\"\"\n if self.xydata is not None and len(self.xydata) > 3:\n self.xydata[3] = np.array(data)\n else:\n raise ValueError(\"dy data not available\")\n\n x = property(__get_x, __set_x)\n y = data = property(__get_y, __set_y)\n dx = property(__get_dx, __set_dx)\n dy = property(__get_dy, __set_dy)\n\n def get_data(self, roi_index: int | None = None) -> np.ndarray:\n \"\"\"\n Return original data (if ROI is not defined or `roi_index` is None),\n or ROI data (if both ROI and `roi_index` are defined).\n\n Args:\n roi_index (int): ROI index\n\n Returns:\n numpy.ndarray: data\n \"\"\"\n if self.roi is None or roi_index is None:\n return self.x, self.y\n i1, i2 = self.roi[roi_index, :]\n return self.x[i1:i2], self.y[i1:i2]\n\n def update_plot_item_parameters(self, item: CurveItem) -> None:\n \"\"\"Update plot item parameters from object data/metadata\n\n Takes into account a subset of plot item parameters. Those parameters may\n have been overriden by object metadata entries or other object data. The goal\n is to update the plot item accordingly.\n\n This is *almost* the inverse operation of `update_metadata_from_plot_item`.\n\n Args:\n item: plot item\n \"\"\"\n update_dataset(item.param.line, self.metadata)\n update_dataset(item.param.symbol, self.metadata)\n super().update_plot_item_parameters(item)\n\n def update_metadata_from_plot_item(self, item: CurveItem) -> None:\n \"\"\"Update metadata from plot item.\n\n Takes into account a subset of plot item parameters. Those parameters may\n have been modified by the user through the plot item GUI. The goal is to\n update the metadata accordingly.\n\n This is *almost* the inverse operation of `update_plot_item_parameters`.\n\n Args:\n item: plot item\n \"\"\"\n super().update_metadata_from_plot_item(item)\n restore_dataset(item.param.line, self.metadata)\n restore_dataset(item.param.symbol, self.metadata)\n\n def make_item(self, update_from: CurveItem = None) -> CurveItem:\n \"\"\"Make plot item from data.\n\n Args:\n update_from (CurveItem): plot item to update from\n\n Returns:\n CurveItem: plot item\n \"\"\"\n if len(self.xydata) in (2, 3, 4):\n if len(self.xydata) == 2: # x, y signal\n x, y = self.xydata\n item = make.mcurve(x.real, y.real, label=self.title)\n elif len(self.xydata) == 3: # x, y, dy error bar signal\n x, y, dy = self.xydata\n item = make.merror(x.real, y.real, dy.real, label=self.title)\n elif len(self.xydata) == 4: # x, y, dx, dy error bar signal\n x, y, dx, dy = self.xydata\n item = make.merror(x.real, y.real, dx.real, dy.real, label=self.title)\n CurveStyles.apply_style(item.param)\n else:\n raise RuntimeError(\"data not supported\")\n if update_from is None:\n if execenv.demo_mode:\n item.param.line.width = 3\n self.update_plot_item_parameters(item)\n else:\n update_dataset(item.param, update_from.param)\n item.update_params()\n return item\n\n def update_item(self, item: CurveItem, data_changed: bool = True) -> None:\n \"\"\"Update plot item from data.\n\n Args:\n item (CurveItem): plot item\n data_changed (bool): if True, data has changed\n \"\"\"\n if data_changed:\n if len(self.xydata) == 2: # x, y signal\n x, y = self.xydata\n item.set_data(x.real, y.real)\n elif len(self.xydata) == 3: # x, y, dy error bar signal\n x, y, dy = self.xydata\n item.set_data(x.real, y.real, dy=dy.real)\n elif len(self.xydata) == 4: # x, y, dx, dy error bar signal\n x, y, dx, dy = self.xydata\n item.set_data(x.real, y.real, dx.real, dy.real)\n item.param.label = self.title\n self.update_plot_item_parameters(item)\n\n def roi_coords_to_indexes(self, coords: list) -> np.ndarray:\n \"\"\"Convert ROI coordinates to indexes.\n\n Args:\n coords (list): coordinates\n\n Returns:\n numpy.ndarray: indexes\n \"\"\"\n indexes = np.array(coords, int)\n for row in range(indexes.shape[0]):\n for col in range(indexes.shape[1]):\n x0 = coords[row][col]\n indexes[row, col] = np.abs(self.x - x0).argmin()\n return indexes\n\n def get_roi_param(self, title: str, *defaults) -> gds.DataSet:\n \"\"\"Return ROI parameters dataset.\n\n Args:\n title (str): title\n *defaults: default values\n \"\"\"\n imax = len(self.x) - 1\n i0, i1 = defaults\n param = ROIParam(title)\n param.col1 = i0\n param.col2 = i1\n param.set_global_prop(\"data\", min=-1, max=imax)\n return param\n\n @staticmethod\n def params_to_roidata(params: gds.DataSetGroup) -> np.ndarray:\n \"\"\"Convert ROI dataset group to ROI array data.\n\n Args:\n params (DataSetGroup): ROI dataset group\n\n Returns:\n numpy.ndarray: ROI array data\n \"\"\"\n roilist = []\n for roiparam in params.datasets:\n roiparam: ROIParam\n roilist.append([roiparam.col1, roiparam.col2])\n if len(roilist) == 0:\n return None\n return np.array(roilist, int)\n\n def new_roi_item(self, fmt: str, lbl: bool, editable: bool):\n \"\"\"Return a new ROI item from scratch\n\n Args:\n fmt (str): format string\n lbl (bool): if True, add label\n editable (bool): if True, ROI is editable\n \"\"\"\n coords = self.x.min(), self.x.max()\n return base.make_roi_item(\n lambda x, y, _title: make.range(x, y),\n coords,\n \"ROI\",\n fmt,\n lbl,\n editable,\n option=\"shape/drag\",\n )\n\n def iterate_roi_items(self, fmt: str, lbl: bool, editable: bool = True):\n \"\"\"Make plot item representing a Region of Interest.\n\n Args:\n fmt (str): format string\n lbl (bool): if True, add label\n editable (bool): if True, ROI is editable\n\n Yields:\n PlotItem: plot item\n \"\"\"\n if self.roi is not None:\n for index, coords in enumerate(self.x[self.roi]):\n yield base.make_roi_item(\n lambda x, y, _title: make.range(x, y),\n coords,\n f\"ROI{index:02d}\",\n fmt,\n lbl,\n editable,\n option=\"shape/drag\",\n )\n\n def add_label_with_title(self, title: str | None = None) -> None:\n \"\"\"Add label with title annotation\n\n Args:\n title (str): title (if None, use signal title)\n \"\"\"\n title = self.title if title is None else title\n if title:\n label = make.label(title, \"TL\", (0, 0), \"TL\")\n self.add_annotations_from_items([label])" } ]
import guidata.dataset as gds import numpy as np import scipy.integrate as spt import scipy.ndimage as spi import scipy.optimize as spo import scipy.signal as sps from cdl.algorithms import fit from cdl.algorithms.signal import ( derivative, interpolate, moving_average, normalize, peak_indexes, xpeak, xy_fft, xy_ifft, ) from cdl.config import _ from cdl.core.computation.base import ( ClipParam, FFTParam, GaussianParam, MovingAverageParam, MovingMedianParam, ThresholdParam, ) from cdl.core.model.signal import SignalObj
10,723
return dst def compute_division(src1: SignalObj, src2: SignalObj) -> SignalObj: """Compute division between two signals Args: src1 (SignalObj): source signal 1 src2 (SignalObj): source signal 2 Returns: SignalObj: result signal object """ dst = dst_n1n(src1, src2, "division") x1, y1 = src1.get_data() _x2, y2 = src2.get_data() dst.set_xydata(x1, y1 / np.array(y2, dtype=y1.dtype)) return dst # -------- compute_11 functions -------------------------------------------------------- # Functions with 1 input image and 1 output image # -------------------------------------------------------------------------------------- def extract_multiple_roi(src: SignalObj, group: gds.DataSetGroup) -> SignalObj: """Extract multiple regions of interest from data Args: src (SignalObj): source signal group (gds.DataSetGroup): group of parameters Returns: SignalObj: signal with multiple regions of interest """ suffix = None if len(group.datasets) == 1: p = group.datasets[0] suffix = f"indexes={p.col1:d}:{p.col2:d}" dst = dst_11(src, "extract_multiple_roi", suffix) x, y = src.get_data() xout, yout = np.ones_like(x) * np.nan, np.ones_like(y) * np.nan for p in group.datasets: slice0 = slice(p.col1, p.col2 + 1) xout[slice0], yout[slice0] = x[slice0], y[slice0] nans = np.isnan(xout) | np.isnan(yout) dst.set_xydata(xout[~nans], yout[~nans]) # TODO: [P2] Instead of removing geometric shapes, apply roi extract dst.remove_all_shapes() return dst def extract_single_roi(src: SignalObj, p: gds.DataSet) -> SignalObj: """Extract single region of interest from data Args: src (SignalObj): source signal p (gds.DataSet): parameters Returns: SignalObj: signal with single region of interest """ dst = dst_11(src, "extract_single_roi", f"indexes={p.col1:d}:{p.col2:d}") x, y = src.get_data() dst.set_xydata(x[p.col1 : p.col2 + 1], y[p.col1 : p.col2 + 1]) # TODO: [P2] Instead of removing geometric shapes, apply roi extract dst.remove_all_shapes() return dst def compute_swap_axes(src: SignalObj) -> SignalObj: """Swap axes Args: src (SignalObj): source signal Returns: SignalObj: result signal object """ dst = dst_11(src, "swap_axes") x, y = src.get_data() dst.set_xydata(y, x) return dst def compute_abs(src: SignalObj) -> SignalObj: """Compute absolute value Args: src (SignalObj): source signal Returns: SignalObj: result signal object """ dst = dst_11(src, "abs") x, y = src.get_data() dst.set_xydata(x, np.abs(y)) return dst def compute_re(src: SignalObj) -> SignalObj: """Compute real part Args: src (SignalObj): source signal Returns: SignalObj: result signal object """ dst = dst_11(src, "re") x, y = src.get_data() dst.set_xydata(x, np.real(y)) return dst def compute_im(src: SignalObj) -> SignalObj: """Compute imaginary part Args: src (SignalObj): source signal Returns: SignalObj: result signal object """ dst = dst_11(src, "im") x, y = src.get_data() dst.set_xydata(x, np.imag(y)) return dst class DataTypeSParam(gds.DataSet): """Convert signal data type parameters""" dtype_str = gds.ChoiceItem(
# -*- coding: utf-8 -*- # # Licensed under the terms of the BSD 3-Clause # (see cdl/LICENSE for details) """ .. Signal computation objects (see parent package :mod:`cdl.core.computation`) """ # pylint: disable=invalid-name # Allows short reference names like x, y, ... # Note: # ---- # All dataset classes must also be imported in the cdl.core.computation.param module. from __future__ import annotations VALID_DTYPES_STRLIST = SignalObj.get_valid_dtypenames() def dst_11(src: SignalObj, name: str, suffix: str | None = None) -> SignalObj: """Create result signal object for compute_11 function Args: src (SignalObj): source signal name (str): name of the function Returns: SignalObj: result signal object """ dst = src.copy(title=f"{name}({src.short_id})") if suffix is not None: dst.title += "|" + suffix return dst def dst_n1n(src1: SignalObj, src2: SignalObj, name: str, suffix: str | None = None): """Create result signal object for compute_n1n function Args: src1 (SignalObj): source signal 1 src2 (SignalObj): source signal 2 name (str): name of the function Returns: SignalObj: result signal object """ dst = src1.copy(title=f"{name}({src1.short_id}, {src2.short_id})") if suffix is not None: dst.title += "|" + suffix return dst # -------- compute_n1 functions -------------------------------------------------------- # Functions with N input signals and 1 output signal # -------------------------------------------------------------------------------------- # Those functions are perfoming a computation on N input signals and return a single # output signal. If we were only executing these functions locally, we would not need # to define them here, but since we are using the multiprocessing module, we need to # define them here so that they can be pickled and sent to the worker processes. # Also, we need to systematically return the output signal object, even if it is already # modified in place, because the multiprocessing module will not be able to retrieve # the modified object from the worker processes. def compute_add(dst: SignalObj, src: SignalObj) -> SignalObj: """Add signal to result signal Args: dst (SignalObj): destination signal src (SignalObj): source signal """ dst.y += np.array(src.y, dtype=dst.y.dtype) if dst.dy is not None: dst.dy = np.sqrt(dst.dy**2 + src.dy**2) return dst def compute_product(dst: SignalObj, src: SignalObj) -> SignalObj: """Multiply signal to result signal Args: dst (SignalObj): destination signal src (SignalObj): source signal """ dst.y *= np.array(src.y, dtype=dst.y.dtype) if dst.dy is not None: dst.dy = dst.y * np.sqrt((dst.dy / dst.y) ** 2 + (src.dy / src.y) ** 2) return dst # -------- compute_n1n functions ------------------------------------------------------- # Functions with N input images + 1 input image and N output images # -------------------------------------------------------------------------------------- def compute_difference(src1: SignalObj, src2: SignalObj) -> SignalObj: """Compute difference between two signals Args: src1 (SignalObj): source signal 1 src2 (SignalObj): source signal 2 Returns: SignalObj: result signal object """ dst = dst_n1n(src1, src2, "difference") dst.y = src1.y - src2.y if dst.dy is not None: dst.dy = np.sqrt(src1.dy**2 + src2.dy**2) return dst def compute_quadratic_difference(src1: SignalObj, src2: SignalObj) -> SignalObj: """Compute quadratic difference between two signals Args: src1 (SignalObj): source signal 1 src2 (SignalObj): source signal 2 Returns: SignalObj: result signal object """ dst = dst_n1n(src1, src2, "quadratic_difference") x1, y1 = src1.get_data() _x2, y2 = src2.get_data() dst.set_xydata(x1, (y1 - np.array(y2, dtype=y1.dtype)) / np.sqrt(2.0)) if np.issubdtype(dst.data.dtype, np.unsignedinteger): dst.data[src1.data < src2.data] = 0 if dst.dy is not None: dst.dy = np.sqrt(src1.dy**2 + src2.dy**2) return dst def compute_division(src1: SignalObj, src2: SignalObj) -> SignalObj: """Compute division between two signals Args: src1 (SignalObj): source signal 1 src2 (SignalObj): source signal 2 Returns: SignalObj: result signal object """ dst = dst_n1n(src1, src2, "division") x1, y1 = src1.get_data() _x2, y2 = src2.get_data() dst.set_xydata(x1, y1 / np.array(y2, dtype=y1.dtype)) return dst # -------- compute_11 functions -------------------------------------------------------- # Functions with 1 input image and 1 output image # -------------------------------------------------------------------------------------- def extract_multiple_roi(src: SignalObj, group: gds.DataSetGroup) -> SignalObj: """Extract multiple regions of interest from data Args: src (SignalObj): source signal group (gds.DataSetGroup): group of parameters Returns: SignalObj: signal with multiple regions of interest """ suffix = None if len(group.datasets) == 1: p = group.datasets[0] suffix = f"indexes={p.col1:d}:{p.col2:d}" dst = dst_11(src, "extract_multiple_roi", suffix) x, y = src.get_data() xout, yout = np.ones_like(x) * np.nan, np.ones_like(y) * np.nan for p in group.datasets: slice0 = slice(p.col1, p.col2 + 1) xout[slice0], yout[slice0] = x[slice0], y[slice0] nans = np.isnan(xout) | np.isnan(yout) dst.set_xydata(xout[~nans], yout[~nans]) # TODO: [P2] Instead of removing geometric shapes, apply roi extract dst.remove_all_shapes() return dst def extract_single_roi(src: SignalObj, p: gds.DataSet) -> SignalObj: """Extract single region of interest from data Args: src (SignalObj): source signal p (gds.DataSet): parameters Returns: SignalObj: signal with single region of interest """ dst = dst_11(src, "extract_single_roi", f"indexes={p.col1:d}:{p.col2:d}") x, y = src.get_data() dst.set_xydata(x[p.col1 : p.col2 + 1], y[p.col1 : p.col2 + 1]) # TODO: [P2] Instead of removing geometric shapes, apply roi extract dst.remove_all_shapes() return dst def compute_swap_axes(src: SignalObj) -> SignalObj: """Swap axes Args: src (SignalObj): source signal Returns: SignalObj: result signal object """ dst = dst_11(src, "swap_axes") x, y = src.get_data() dst.set_xydata(y, x) return dst def compute_abs(src: SignalObj) -> SignalObj: """Compute absolute value Args: src (SignalObj): source signal Returns: SignalObj: result signal object """ dst = dst_11(src, "abs") x, y = src.get_data() dst.set_xydata(x, np.abs(y)) return dst def compute_re(src: SignalObj) -> SignalObj: """Compute real part Args: src (SignalObj): source signal Returns: SignalObj: result signal object """ dst = dst_11(src, "re") x, y = src.get_data() dst.set_xydata(x, np.real(y)) return dst def compute_im(src: SignalObj) -> SignalObj: """Compute imaginary part Args: src (SignalObj): source signal Returns: SignalObj: result signal object """ dst = dst_11(src, "im") x, y = src.get_data() dst.set_xydata(x, np.imag(y)) return dst class DataTypeSParam(gds.DataSet): """Convert signal data type parameters""" dtype_str = gds.ChoiceItem(
_("Destination data type"),
9
2023-11-09 16:56:03+00:00
12k
choderalab/chiron
chiron/tests/test_pairs.py
[ { "identifier": "NeighborListNsqrd", "path": "chiron/neighbors.py", "snippet": "class NeighborListNsqrd(PairsBase):\n \"\"\"\n N^2 neighborlist implementation that returns the particle pair ids, displacement vectors, and distances.\n\n Parameters\n ----------\n space: Space\n Class that defines how to calculate the displacement between two points and apply the boundary conditions\n cutoff: float, default = 2.5\n Cutoff distance for the neighborlist\n skin: float, default = 0.4\n Skin distance for the neighborlist\n n_max_neighbors: int, default=200\n Maximum number of neighbors for each particle. Used for padding arrays for efficient jax computations\n This will be checked and dynamically updated during the build stage\n Examples\n --------\n\n\n \"\"\"\n\n def __init__(\n self,\n space: Space,\n cutoff: unit.Quantity = unit.Quantity(1.2, unit.nanometer),\n skin: unit.Quantity = unit.Quantity(0.4, unit.nanometer),\n n_max_neighbors: float = 200,\n ):\n if not isinstance(space, Space):\n raise TypeError(f\"space must be of type Space, found {type(space)}\")\n if not cutoff.unit.is_compatible(unit.angstrom):\n raise ValueError(\n f\"cutoff must be a unit.Quantity with units of distance, cutoff.unit = {cutoff.unit}\"\n )\n if not skin.unit.is_compatible(unit.angstrom):\n raise ValueError(\n f\"cutoff must be a unit.Quantity with units of distance, skin.unit = {skin.unit}\"\n )\n\n self.cutoff = cutoff.value_in_unit_system(unit.md_unit_system)\n self.skin = skin.value_in_unit_system(unit.md_unit_system)\n self.cutoff_and_skin = self.cutoff + self.skin\n self.n_max_neighbors = n_max_neighbors\n self.space = space\n\n # set a a simple variable to know if this has at least been built once as opposed to just initialized\n # this does not imply that the neighborlist is up to date\n self.is_built = False\n\n # note, we need to use the partial decorator in order to use the jit decorate\n # so that it knows to ignore the `self` argument\n @partial(jax.jit, static_argnums=(0,))\n def _pairs_mask(self, particle_ids: jnp.array):\n \"\"\"\n Jitted function to generate mask that allows us to remove self-interactions and double-counting of pairs\n\n Parameters\n ----------\n particle_ids: jnp.array\n Array of particle ids\n\n Returns\n -------\n jnp.array\n Bool mask to remove self-interactions and double-counting of pairs\n\n \"\"\"\n # for the nsq approach, we consider the distance between a particle and all other particles in the system\n # if we used a cell list the possible_neighbors would be a smaller list, i.e., only those in the neigboring cells\n\n possible_neighbors = particle_ids\n\n particles_j = jnp.broadcast_to(\n possible_neighbors,\n (particle_ids.shape[0], possible_neighbors.shape[0]),\n )\n\n # reshape the particle_ids\n particles_i = jnp.reshape(particle_ids, (particle_ids.shape[0], 1))\n # create a mask to exclude self interactions and double counting\n temp_mask = particles_i < particles_j\n\n return temp_mask\n\n @partial(jax.jit, static_argnums=(0, 5))\n def _build_neighborlist(\n self, particle_i, reduction_mask, pid, coordinates, n_max_neighbors\n ):\n \"\"\"\n Jitted function to build the neighbor list for a single particle\n\n Parameters\n ----------\n particle_i: jnp.array\n X,Y,Z coordinates of particle i\n reduction_mask: jnp.array\n Mask to exclude self-interactions and double counting of pairs\n coordinates: jnp.array\n X,Y,Z coordinates of all particles\n n_max_neighbors: int\n Maximum number of neighbors for each particle. Used for padding arrays for efficient jax computations\n\n Returns\n -------\n neighbor_list_mask: jnp.array\n Mask to exclude padding from the neighbor list\n neighbor_list: jnp.array\n List of particle ids for the neighbors, padded to n_max_neighbors\n n_neighbors: int\n Number of neighbors for the particle\n \"\"\"\n\n # calculate the displacement between particle i and all other particles\n r_ij, dist = self.space.displacement(particle_i, coordinates)\n\n # neighbor_mask will be an array of length n_particles (i.e., length of coordinates)\n # where each element is True if the particle is a neighbor, False if it is not\n # subject to both the cutoff+skin and the reduction mask that eliminates double counting and self-interactions\n neighbor_mask = jnp.where(\n (dist < self.cutoff_and_skin) & (reduction_mask), True, False\n )\n # when we pad the neighbor list, we will use last particle id in the neighbor list\n # this choice was made such that when we use the neighbor list in the masked energy calculat\n # the padded values will result in reasonably well defined values\n fill_value = jnp.argmax(neighbor_mask)\n fill_value = jnp.where(fill_value == pid, fill_value + 1, fill_value)\n\n # count up the number of neighbors\n n_neighbors = jnp.where(neighbor_mask, 1, 0).sum()\n\n # since neighbor_mask indices have a one-to-one correspondence to particle ids,\n # applying jnp.where, will return an array of the indices that are neighbors.\n # since this needs to be uniformly sized, we can just fill this array up to the n_max_neighbors.\n neighbor_list = jnp.array(\n jnp.where(neighbor_mask, size=n_max_neighbors, fill_value=fill_value),\n dtype=jnp.uint32,\n )\n # we need to generate a new mask associatd with the padded neighbor list\n # to be able to quickly exclude the padded values from the neighbor list\n neighbor_list_mask = jnp.where(jnp.arange(n_max_neighbors) < n_neighbors, 1, 0)\n\n del r_ij, dist\n return neighbor_list_mask, neighbor_list, n_neighbors\n\n def build(\n self,\n coordinates: Union[jnp.array, unit.Quantity],\n box_vectors: Union[jnp.array, unit.Quantity],\n ):\n \"\"\"\n Build the neighborlist from an array of coordinates and box vectors.\n\n Parameters\n ----------\n coordinates: jnp.array\n Shape[N,3] array of particle coordinates\n box_vectors: jnp.array\n Shape[3,3] array of box vectors\n\n Returns\n -------\n None\n\n \"\"\"\n\n # set our reference coordinates\n # the call to x0 and box_vectors automatically convert these to jnp arrays in the correct unit system\n if isinstance(coordinates, unit.Quantity):\n if not coordinates.unit.is_compatible(unit.nanometer):\n raise ValueError(\n f\"Coordinates require distance units, not {coordinates.unit}\"\n )\n coordinates = coordinates.value_in_unit_system(unit.md_unit_system)\n\n if isinstance(box_vectors, unit.Quantity):\n if not box_vectors.unit.is_compatible(unit.nanometer):\n raise ValueError(\n f\"Box vectors require distance unit, not {box_vectors.unit}\"\n )\n box_vectors = box_vectors.value_in_unit_system(unit.md_unit_system)\n\n if box_vectors.shape != (3, 3):\n raise ValueError(\n f\"box_vectors should be a 3x3 array, shape provided: {box_vectors.shape}\"\n )\n\n self.ref_coordinates = coordinates\n self.box_vectors = box_vectors\n\n # the neighborlist assumes that the box vectors do not change between building and calculating the neighbor list\n # changes to the box vectors require rebuilding the neighbor list\n self.space.box_vectors = self.box_vectors\n\n # store the ids of all the particles\n self.particle_ids = jnp.array(\n range(0, self.ref_coordinates.shape[0]), dtype=jnp.uint32\n )\n\n # calculate which pairs to exclude\n reduction_mask = self._pairs_mask(self.particle_ids)\n\n # calculate the distance for all pairs this will return\n # neighbor_mask: an array of shape (n_particles, n_particles) where each element is the mask\n # to determine if the particle is a neighbor\n # neighbor_list: an array of shape (n_particles, n_max_neighbors) where each element is the particle id of the neighbor\n # this is padded with zeros to ensure a uniform size;\n # n_neighbors: an array of shape (n_particles) where each element is the number of neighbors for that particle\n\n self.neighbor_mask, self.neighbor_list, self.n_neighbors = jax.vmap(\n self._build_neighborlist, in_axes=(0, 0, 0, None, None)\n )(\n self.ref_coordinates,\n reduction_mask,\n self.particle_ids,\n self.ref_coordinates,\n self.n_max_neighbors,\n )\n\n self.neighbor_list = self.neighbor_list.reshape(-1, self.n_max_neighbors)\n\n while jnp.any(self.n_neighbors == self.n_max_neighbors).block_until_ready():\n log.debug(\n f\"Increasing n_max_neighbors from {self.n_max_neighbors} to at {jnp.max(self.n_neighbors)+10}\"\n )\n self.n_max_neighbors = int(jnp.max(self.n_neighbors) + 10)\n\n self.neighbor_mask, self.neighbor_list, self.n_neighbors = jax.vmap(\n self._build_neighborlist, in_axes=(0, 0, 0, None, None)\n )(\n self.ref_coordinates,\n reduction_mask,\n self.particle_ids,\n self.ref_coordinates,\n self.n_max_neighbors,\n )\n\n self.neighbor_list = self.neighbor_list.reshape(-1, self.n_max_neighbors)\n\n self.is_built = True\n\n @partial(jax.jit, static_argnums=(0,))\n def _calc_distance_per_particle(\n self, particle1, neighbors, neighbor_mask, coordinates\n ):\n \"\"\"\n Jitted function to calculate the distance between a particle and its neighbors\n\n Parameters\n ----------\n particle1: int\n Particle id\n neighbors: jnp.array\n Array of particle ids for the neighbors of particle1\n neighbor_mask: jnp.array\n Mask to exclude padding from the neighbor list of particle1\n coordinates: jnp.array\n X,Y,Z coordinates of all particles\n\n Returns\n -------\n n_pairs: int\n Number of interacting pairs for the particle\n mask: jnp.array\n Mask to exclude padding from the neighbor list of particle1.\n If a particle is within the interaction cutoff, the mask is 1, otherwise it is 0\n dist: jnp.array\n Array of distances between the particle and its neighbors\n r_ij: jnp.array\n Array of displacement vectors between the particle and its neighbors\n \"\"\"\n # repeat the particle id for each neighbor\n particles1 = jnp.repeat(particle1, neighbors.shape[0])\n\n # calculate the displacement between particle i and all neighbors\n r_ij, dist = self.space.displacement(\n coordinates[particles1], coordinates[neighbors]\n )\n # calculate the mask to determine if the particle is a neighbor\n # this will be done based on the interaction cutoff and using the neighbor_mask to exclude padding\n mask = jnp.where((dist < self.cutoff) & (neighbor_mask), 1, 0)\n\n # calculate the number of pairs\n n_pairs = mask.sum()\n\n return n_pairs, mask, dist, r_ij\n\n def calculate(self, coordinates: jnp.array):\n \"\"\"\n Calculate the neighbor list for the current state\n\n Parameters\n ----------\n coordinates: jnp.array\n Shape[N,3] array of particle coordinates\n\n Returns\n -------\n n_neighbors: jnp.array\n Array of number of neighbors for each particle\n neighbor_list: jnp.array\n Array of particle ids for the neighbors, padded to n_max_neighbors. Shape (n_particles, n_max_neighbors)\n padding_mask: jnp.array\n Array of masks to exclude padding from the neighbor list of each particle. Shape (n_particles, n_max_neighbors)\n dist: jnp.array\n Array of distances between each particle and its neighbors. Shape (n_particles, n_max_neighbors)\n r_ij: jnp.array\n Array of displacement vectors between each particle and its neighbors. Shape (n_particles, n_max_neighbors, 3)\n \"\"\"\n # coordinates = sampler_state.x0\n # note, we assume the box vectors do not change between building and calculating the neighbor list\n # changes to the box vectors require rebuilding the neighbor list\n\n n_neighbors, padding_mask, dist, r_ij = jax.vmap(\n self._calc_distance_per_particle, in_axes=(0, 0, 0, None)\n )(self.particle_ids, self.neighbor_list, self.neighbor_mask, coordinates)\n # mask = mask.reshape(-1, self.n_max_neighbors)\n return n_neighbors, self.neighbor_list, padding_mask, dist, r_ij\n\n @partial(jax.jit, static_argnums=(0,))\n def _calculate_particle_displacement(self, particle, coordinates, ref_coordinates):\n \"\"\"\n Calculate the displacement of a particle from the reference coordinates.\n If the displacement exceeds the half the skin distance, return True, otherwise return False.\n\n This function is designed to allow it to be jitted and vmapped over particle indices.\n\n Parameters\n ----------\n particle: int\n Particle id\n coordinates: jnp.array\n Array of particle coordinates\n ref_coordinates: jnp.array\n Array of reference particle coordinates\n\n Returns\n -------\n bool\n True if the particle is outside the skin distance, False if it is not.\n \"\"\"\n # calculate the displacement of a particle from the initial coordinates\n\n r_ij, displacement = self.space.displacement(\n coordinates[particle], ref_coordinates[particle]\n )\n\n status = jnp.where(displacement >= self.skin / 2.0, True, False)\n del displacement\n return status\n\n def check(self, coordinates: jnp.array) -> bool:\n \"\"\"\n Check if the neighbor list needs to be rebuilt based on displacement of the particles from the reference coordinates.\n If a particle moves more than 0.5 skin distance, the neighborlist will be rebuilt.\n Will also return True if the size of the coordinates array changes.\n\n Note, this could also accept a user defined criteria for distance, but this is not implemented yet.\n\n Parameters\n ----------\n coordinates: jnp.array\n Array of particle coordinates\n Returns\n -------\n bool\n True if the neighbor list needs to be rebuilt, False if it does not.\n \"\"\"\n\n if self.ref_coordinates.shape[0] != coordinates.shape[0]:\n return True\n\n status = jax.vmap(\n self._calculate_particle_displacement, in_axes=(0, None, None)\n )(self.particle_ids, coordinates, self.ref_coordinates)\n if jnp.any(status):\n del status\n return True\n else:\n del status\n return False" }, { "identifier": "PairList", "path": "chiron/neighbors.py", "snippet": "class PairList(PairsBase):\n \"\"\"\n N^2 pairlist implementation that returns the particle pair ids, displacement vectors, and distances.\n\n Parameters\n ----------\n space: Space\n Class that defines how to calculate the displacement between two points and apply the boundary conditions\n cutoff: float, default = 2.5\n Cutoff distance for the pair list calculation\n Examples\n --------\n >>> from chiron.neighbors import PairList, OrthogonalPeriodicSpace\n >>> from chiron.states import SamplerState\n >>> import jax.numpy as jnp\n >>>\n >>> space = OrthogonalPeriodicSpace()\n >>> pair_list = PairList(space, cutoff=2.5)\n >>> sampler_state = SamplerState(x0=jnp.array([[0.0, 0.0, 0.0], [2, 0.0, 0.0], [0.0, 2, 0.0]]),\n >>> box_vectors=jnp.array([[10, 0.0, 0.0], [0.0, 10, 0.0], [0.0, 0.0, 10]]))\n >>> pair_list.build_from_state(sampler_state)\n >>>\n >>> # mask and distances are of shape (n_particles, n_particles-1),\n >>> displacement_vectors of shape (n_particles, n_particles-1, 3)\n >>> # mask, is a bool array that is True if the particle is within the cutoff distance, False if it is not\n >>> # n_pairs is of shape (n_particles) and is per row sum of the mask. The mask ensure we also do not double count pairs\n >>> n_pairs, mask, distances, displacement_vectors = pair_list.calculate(sampler_state.x0)\n \"\"\"\n\n def __init__(\n self,\n space: Space,\n cutoff: unit.Quantity = unit.Quantity(1.2, unit.nanometer),\n ):\n if not isinstance(space, Space):\n raise TypeError(f\"space must be of type Space, found {type(space)}\")\n if not cutoff.unit.is_compatible(unit.angstrom):\n raise ValueError(\n f\"cutoff must be a unit.Quantity with units of distance, cutoff.unit = {cutoff.unit}\"\n )\n\n self.cutoff = cutoff.value_in_unit_system(unit.md_unit_system)\n self.space = space\n\n # set a a simple variable to know if this has at least been built once as opposed to just initialized\n # this does not imply that the neighborlist is up to date\n self.is_built = False\n\n # note, we need to use the partial decorator in order to use the jit decorate\n # so that it knows to ignore the `self` argument\n @partial(jax.jit, static_argnums=(0,))\n def _pairs_and_mask(self, particle_ids: jnp.array):\n \"\"\"\n Jitted function to generate all pairs (excluding self interactions)\n and mask that allows us to remove double-counting of pairs.\n\n Parameters\n ----------\n particle_ids: jnp.array\n Array of particle ids\n\n Returns\n -------\n all_pairs: jnp.array\n Array of all pairs (excluding self interactions), of size (n_particles, n_particles-1)\n reduction_mask: jnp.array\n Bool mask that identifies which pairs to exclude to remove double counting of pairs\n\n \"\"\"\n # for the nsq approach, we consider the distance between a particle and all other particles in the system\n # if we used a cell list the possible_neighbors would be a smaller list, i.e., only those in the neigboring cells\n # we'll just keep with naming syntax for future flexibility\n\n possible_neighbors = particle_ids\n\n particles_j = jnp.broadcast_to(\n possible_neighbors,\n (particle_ids.shape[0], possible_neighbors.shape[0]),\n )\n # reshape the particle_ids\n particles_i = jnp.reshape(particle_ids, (particle_ids.shape[0], 1))\n # create a mask to exclude self interactions and double counting\n temp_mask = particles_i != particles_j\n all_pairs = jax.vmap(self._remove_self_interactions, in_axes=(0, 0))(\n particles_j, temp_mask\n )\n del temp_mask\n all_pairs = jnp.array(all_pairs[0], dtype=jnp.uint32)\n\n reduction_mask = jnp.where(particles_i < all_pairs, True, False)\n\n return all_pairs, reduction_mask\n\n @partial(jax.jit, static_argnums=(0,))\n def _remove_self_interactions(self, particles, temp_mask):\n return jnp.where(\n temp_mask, size=particles.shape[0] - 1, fill_value=particles.shape[0] - 1\n )\n\n def build(\n self,\n coordinates: Union[jnp.array, unit.Quantity],\n box_vectors: Union[jnp.array, unit.Quantity],\n ):\n \"\"\"\n Build the neighborlist from an array of coordinates and box vectors.\n\n Parameters\n ----------\n coordinates: jnp.array\n Shape[n_particles,3] array of particle coordinates\n box_vectors: jnp.array\n Shape[3,3] array of box vectors\n\n Returns\n -------\n None\n\n \"\"\"\n\n # set our reference coordinates\n # this will set self.ref_coordinates=coordinates and self.box_vectors\n self._validate_build_inputs(coordinates, box_vectors)\n\n self.n_particles = self.ref_coordinates.shape[0]\n\n # the neighborlist assumes that the box vectors do not change between building and calculating the neighbor list\n # changes to the box vectors require rebuilding the neighbor list\n self.space.box_vectors = self.box_vectors\n\n # store the ids of all the particles\n self.particle_ids = jnp.array(range(0, coordinates.shape[0]), dtype=jnp.uint32)\n\n # calculate which pairs to exclude\n self.all_pairs, self.reduction_mask = self._pairs_and_mask(self.particle_ids)\n\n self.is_built = True\n\n @partial(jax.jit, static_argnums=(0,))\n def _calc_distance_per_particle(\n self, particle1, neighbors, neighbor_mask, coordinates\n ):\n \"\"\"\n Jitted function to calculate the distance between a particle and all possible neighbors\n\n Parameters\n ----------\n particle1: int\n Particle id\n neighbors: jnp.array\n Array of particle ids for the possible particle pairs of particle1\n neighbor_mask: jnp.array\n Mask to exclude double particles to prevent double counting\n coordinates: jnp.array\n X,Y,Z coordinates of all particles, shaped (n_particles, 3)\n\n Returns\n -------\n n_pairs: int\n Number of interacting pairs for the particle\n mask: jnp.array\n Mask to exclude padding particles not within the cutoff particle1.\n If a particle is within the interaction cutoff, the mask is 1, otherwise it is 0\n Array has shape (n_particles, n_particles-1) as it excludes self interactions\n dist: jnp.array\n Array of distances between the particle and all other particles in the system.\n Array has shape (n_particles, n_particles-1) as it excludes self interactions\n r_ij: jnp.array\n Array of displacement vectors between the particle and all other particles in the system.\n Array has shape (n_particles, n_particles-1, 3) as it excludes self interactions\n\n \"\"\"\n # repeat the particle id for each neighbor\n particles1 = jnp.repeat(particle1, neighbors.shape[0])\n\n # calculate the displacement between particle i and all neighbors\n r_ij, dist = self.space.displacement(\n coordinates[particles1], coordinates[neighbors]\n )\n # calculate the mask to determine if the particle is a neighbor\n # this will be done based on the interaction cutoff and using the neighbor_mask to exclude padding\n mask = jnp.where((dist < self.cutoff) & (neighbor_mask), 1, 0)\n\n # calculate the number of pairs\n n_pairs = mask.sum()\n\n return n_pairs, mask, dist, r_ij\n\n def calculate(self, coordinates: jnp.array):\n \"\"\"\n Calculate the neighbor list for the current state\n\n Parameters\n ----------\n coordinates: jnp.array\n Shape[n_particles,3] array of particle coordinates\n\n Returns\n -------\n n_neighbors: jnp.array\n Array of the number of interacting particles (i.e., where dist < cutoff). Shape: (n_particles)\n pairs: jnp.array\n Array of particle ids that were considered for interaction. Shape: (n_particles, n_particles-1)\n padding_mask: jnp.array\n Array used to masks non interaction particle pairs. Shape: (n_particles, n_particles-1)\n dist: jnp.array\n Array of distances between pairs in the system. Shape: (n_particles, n_particles-1)\n r_ij: jnp.array\n Array of displacement vectors between particle pairs. Shape: (n_particles, n_particles-1, 3).\n \"\"\"\n if coordinates.shape[0] != self.n_particles:\n raise ValueError(\n f\"Number of particles cannot changes without rebuilding. \"\n f\"Coordinates must have shape ({self.n_particles}, 3), found {coordinates.shape}\"\n )\n\n # coordinates = self.space.wrap(coordinates)\n\n n_neighbors, padding_mask, dist, r_ij = jax.vmap(\n self._calc_distance_per_particle, in_axes=(0, 0, 0, None)\n )(self.particle_ids, self.all_pairs, self.reduction_mask, coordinates)\n\n return n_neighbors, self.all_pairs, padding_mask, dist, r_ij\n\n def check(self, coordinates: jnp.array) -> bool:\n \"\"\"\n Check if we need to reconstruct internal arrays.\n For a simple pairlist this will always return False, unless the number of particles change.\n\n Parameters\n ----------\n coordinates: jnp.array\n Array of particle coordinates\n Returns\n -------\n bool\n True if we need to rebuild the neighbor list, False if we do not.\n \"\"\"\n if coordinates.shape[0] != self.n_particles:\n return True\n else:\n return False" }, { "identifier": "OrthogonalPeriodicSpace", "path": "chiron/neighbors.py", "snippet": "class OrthogonalPeriodicSpace(Space):\n \"\"\"\n Defines the simulation space for an orthogonal periodic system.\n\n \"\"\"\n\n @property\n def box_vectors(self) -> jnp.array:\n return self._box_vectors\n\n @box_vectors.setter\n def box_vectors(self, box_vectors: jnp.array) -> None:\n self._box_vectors = box_vectors\n self._box_lengths = jnp.array(\n [box_vectors[0][0], box_vectors[1][1], box_vectors[2][2]]\n )\n\n @partial(jax.jit, static_argnums=(0,))\n def displacement(\n self, xyz_1: jnp.array, xyz_2: jnp.array\n ) -> Tuple[jnp.array, jnp.array]:\n \"\"\"\n Calculate the periodic distance between two points.\n\n Parameters\n ----------\n xyz_1: jnp.array\n Coordinates of the first point\n xyz_2: jnp.array\n Coordinates of the second point\n\n Returns\n -------\n r_ij: jnp.array\n Displacement vector between the two points\n dist: float\n Distance between the two points\n\n \"\"\"\n # calculate uncorrect r_ij\n r_ij = xyz_1 - xyz_2\n\n # calculated corrected displacement vector\n r_ij = (\n jnp.mod(r_ij + self._box_lengths * 0.5, self._box_lengths)\n - self._box_lengths * 0.5\n )\n # calculate the scalar distance\n dist = jnp.linalg.norm(r_ij, axis=-1)\n\n return r_ij, dist\n\n @partial(jax.jit, static_argnums=(0,))\n def wrap(self, xyz: jnp.array) -> jnp.array:\n \"\"\"\n Wrap the coordinates of the system.\n\n Parameters\n ----------\n xyz: jnp.array\n Coordinates of the system\n\n Returns\n -------\n jnp.array\n Wrapped coordinates of the system\n\n \"\"\"\n xyz = xyz - jnp.floor(xyz / self._box_lengths) * self._box_lengths\n\n return xyz" }, { "identifier": "OrthogonalNonperiodicSpace", "path": "chiron/neighbors.py", "snippet": "class OrthogonalNonperiodicSpace(Space):\n @partial(jax.jit, static_argnums=(0,))\n def displacement(\n self,\n xyz_1: jnp.array,\n xyz_2: jnp.array,\n ) -> Tuple[jnp.array, jnp.array]:\n \"\"\"\n Calculate the periodic distance between two points.\n\n Parameters\n ----------\n xyz_1: jnp.array\n Coordinates of the first point\n xyz_2: jnp.array\n Coordinates of the second point\n\n Returns\n -------\n r_ij: jnp.array\n Displacement vector between the two points\n dist: float\n Distance between the two points\n\n \"\"\"\n # calculate uncorrect r_ij\n r_ij = xyz_1 - xyz_2\n\n # calculate the scalar distance\n dist = jnp.linalg.norm(r_ij, axis=-1)\n\n return r_ij, dist\n\n @partial(jax.jit, static_argnums=(0,))\n def wrap(self, xyz: jnp.array) -> jnp.array:\n \"\"\"\n Wrap the coordinates of the system.\n For the Non-periodic system, this does not alter the coordinates\n\n Parameters\n ----------\n xyz: jnp.array\n Coordinates of the system\n\n Returns\n -------\n jnp.array\n Wrapped coordinates of the system\n\n \"\"\"\n return xyz" }, { "identifier": "SamplerState", "path": "chiron/states.py", "snippet": "class SamplerState:\n \"\"\"\n Represents the state of the system that is updated during integration.\n\n Parameters\n ----------\n x0 : unit.Quantity\n The current positions of the particles in the simulation.\n velocities : unit.Quantity, optional\n The velocities of the particles in the simulation.\n box_vectors : unit.Quantity, optional\n The box vectors defining the simulation's periodic boundary conditions.\n\n \"\"\"\n\n def __init__(\n self,\n x0: unit.Quantity,\n velocities: Optional[unit.Quantity] = None,\n box_vectors: Optional[unit.Quantity] = None,\n ) -> None:\n # NOTE: all units are internally in the openMM units system as documented here:\n # http://docs.openmm.org/latest/userguide/theory/01_introduction.html#units\n if not isinstance(x0, unit.Quantity):\n raise TypeError(f\"x0 must be a unit.Quantity, got {type(x0)} instead.\")\n if velocities is not None and not isinstance(velocities, unit.Quantity):\n raise TypeError(\n f\"velocities must be a unit.Quantity, got {type(velocities)} instead.\"\n )\n if box_vectors is not None and not isinstance(box_vectors, unit.Quantity):\n if isinstance(box_vectors, List):\n try:\n box_vectors = self._convert_from_openmm_box(box_vectors)\n except:\n raise TypeError(f\"Unable to parse box_vectors {box_vectors}.\")\n else:\n raise TypeError(\n f\"box_vectors must be a unit.Quantity or openMM box, got {type(box_vectors)} instead.\"\n )\n if not x0.unit.is_compatible(unit.nanometer):\n raise ValueError(f\"x0 must have units of distance, got {x0.unit} instead.\")\n if velocities is not None and not velocities.unit.is_compatible(\n unit.nanometer / unit.picosecond\n ):\n raise ValueError(\n f\"velocities must have units of distance/time, got {velocities.unit} instead.\"\n )\n if box_vectors is not None and not box_vectors.unit.is_compatible(\n unit.nanometer\n ):\n raise ValueError(\n f\"box_vectors must have units of distance, got {box_vectors.unit} instead.\"\n )\n if box_vectors is not None and box_vectors.shape != (3, 3):\n raise ValueError(\n f\"box_vectors must be a 3x3 array, got {box_vectors.shape} instead.\"\n )\n\n self._x0 = x0\n self._velocities = velocities\n self._box_vectors = box_vectors\n self._distance_unit = unit.nanometer\n\n @property\n def x0(self) -> jnp.array:\n return self._convert_to_jnp(self._x0)\n\n @property\n def velocities(self) -> jnp.array:\n if self._velocities is None:\n return None\n return self._convert_to_jnp(self._velocities)\n\n @property\n def box_vectors(self) -> jnp.array:\n if self._box_vectors is None:\n return None\n return self._convert_to_jnp(self._box_vectors)\n\n @x0.setter\n def x0(self, x0: Union[jnp.array, unit.Quantity]) -> None:\n if isinstance(x0, unit.Quantity):\n self._x0 = x0\n else:\n self._x0 = unit.Quantity(x0, self._distance_unit)\n\n @property\n def distance_unit(self) -> unit.Unit:\n return self._distance_unit\n\n def _convert_to_jnp(self, array: unit.Quantity) -> jnp.array:\n \"\"\"\n Convert the sampler state to jnp arrays.\n \"\"\"\n import jax.numpy as jnp\n\n array_ = array.value_in_unit_system(unit.md_unit_system)\n return jnp.array(array_)\n\n def _convert_from_openmm_box(self, openmm_box_vectors: List) -> unit.Quantity:\n box_vec = []\n for i in range(0, 3):\n layer = []\n for j in range(0, 3):\n layer.append(\n openmm_box_vectors[i][j].value_in_unit(openmm_box_vectors[0].unit)\n )\n box_vec.append(layer)\n return unit.Quantity(jnp.array(box_vec), openmm_box_vectors[0].unit)" } ]
import jax.numpy as jnp import pytest from chiron.neighbors import ( NeighborListNsqrd, PairList, OrthogonalPeriodicSpace, OrthogonalNonperiodicSpace, ) from chiron.states import SamplerState from openmm import unit
8,923
def test_orthogonal_periodic_displacement(): # test that the incorrect box shapes throw an exception with pytest.raises(ValueError): space = OrthogonalPeriodicSpace(jnp.array([10.0, 10.0, 10.0])) # test that incorrect units throw an exception with pytest.raises(ValueError): space = OrthogonalPeriodicSpace( unit.Quantity( jnp.array([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0], [0.0, 0.0, 10.0]]), unit.radians, ) ) space = OrthogonalPeriodicSpace( jnp.array([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0], [0.0, 0.0, 10.0]]) ) # test that the box vectors are set correctly assert jnp.all( space.box_vectors == jnp.array([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0], [0.0, 0.0, 10.0]]) ) # test that the box lengths for an orthogonal box are set correctly assert jnp.all(space._box_lengths == jnp.array([10.0, 10.0, 10.0])) # test calculation of the displacement_vector and distance between two points p1 = jnp.array([[0, 0, 0], [0, 0, 0]]) p2 = jnp.array([[1, 0, 0], [6, 0, 0]]) r_ij, distance = space.displacement(p1, p2) assert jnp.all(r_ij == jnp.array([[-1.0, 0.0, 0.0], [4.0, 0.0, 0.0]])) assert jnp.all(distance == jnp.array([1, 4])) # test that the periodic wrapping works as expected wrapped_x = space.wrap(jnp.array([11, 0, 0])) assert jnp.all(wrapped_x == jnp.array([1, 0, 0])) wrapped_x = space.wrap(jnp.array([-1, 0, 0])) assert jnp.all(wrapped_x == jnp.array([9, 0, 0])) wrapped_x = space.wrap(jnp.array([5, 0, 0])) assert jnp.all(wrapped_x == jnp.array([5, 0, 0])) wrapped_x = space.wrap(jnp.array([5, 12, -1])) assert jnp.all(wrapped_x == jnp.array([5, 2, 9])) # test the setter for the box vectors space.box_vectors = jnp.array( [[10.0, 0.0, 0.0], [0.0, 20.0, 0.0], [0.0, 0.0, 30.0]] ) assert jnp.all( space._box_vectors == jnp.array([[10.0, 0.0, 0.0], [0.0, 20.0, 0.0], [0.0, 0.0, 30.0]]) ) assert jnp.all(space._box_lengths == jnp.array([10.0, 20.0, 30.0])) def test_orthogonal_nonperiodic_displacement():
def test_orthogonal_periodic_displacement(): # test that the incorrect box shapes throw an exception with pytest.raises(ValueError): space = OrthogonalPeriodicSpace(jnp.array([10.0, 10.0, 10.0])) # test that incorrect units throw an exception with pytest.raises(ValueError): space = OrthogonalPeriodicSpace( unit.Quantity( jnp.array([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0], [0.0, 0.0, 10.0]]), unit.radians, ) ) space = OrthogonalPeriodicSpace( jnp.array([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0], [0.0, 0.0, 10.0]]) ) # test that the box vectors are set correctly assert jnp.all( space.box_vectors == jnp.array([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0], [0.0, 0.0, 10.0]]) ) # test that the box lengths for an orthogonal box are set correctly assert jnp.all(space._box_lengths == jnp.array([10.0, 10.0, 10.0])) # test calculation of the displacement_vector and distance between two points p1 = jnp.array([[0, 0, 0], [0, 0, 0]]) p2 = jnp.array([[1, 0, 0], [6, 0, 0]]) r_ij, distance = space.displacement(p1, p2) assert jnp.all(r_ij == jnp.array([[-1.0, 0.0, 0.0], [4.0, 0.0, 0.0]])) assert jnp.all(distance == jnp.array([1, 4])) # test that the periodic wrapping works as expected wrapped_x = space.wrap(jnp.array([11, 0, 0])) assert jnp.all(wrapped_x == jnp.array([1, 0, 0])) wrapped_x = space.wrap(jnp.array([-1, 0, 0])) assert jnp.all(wrapped_x == jnp.array([9, 0, 0])) wrapped_x = space.wrap(jnp.array([5, 0, 0])) assert jnp.all(wrapped_x == jnp.array([5, 0, 0])) wrapped_x = space.wrap(jnp.array([5, 12, -1])) assert jnp.all(wrapped_x == jnp.array([5, 2, 9])) # test the setter for the box vectors space.box_vectors = jnp.array( [[10.0, 0.0, 0.0], [0.0, 20.0, 0.0], [0.0, 0.0, 30.0]] ) assert jnp.all( space._box_vectors == jnp.array([[10.0, 0.0, 0.0], [0.0, 20.0, 0.0], [0.0, 0.0, 30.0]]) ) assert jnp.all(space._box_lengths == jnp.array([10.0, 20.0, 30.0])) def test_orthogonal_nonperiodic_displacement():
space = OrthogonalNonperiodicSpace(
3
2023-11-07 18:17:43+00:00
12k
Rishit-dagli/Astroformer
pytorch-image-models/timm/models/vision_transformer.py
[ { "identifier": "build_model_with_cfg", "path": "pytorch-image-models/timm/models/_builder.py", "snippet": "def build_model_with_cfg(\n model_cls: Callable,\n variant: str,\n pretrained: bool,\n pretrained_cfg: Optional[Dict] = None,\n pretrained_cfg_overlay: Optional[Dict] = None,\n model_cfg: Optional[Any] = None,\n feature_cfg: Optional[Dict] = None,\n pretrained_strict: bool = True,\n pretrained_filter_fn: Optional[Callable] = None,\n kwargs_filter: Optional[Tuple[str]] = None,\n **kwargs,\n):\n \"\"\" Build model with specified default_cfg and optional model_cfg\n\n This helper fn aids in the construction of a model including:\n * handling default_cfg and associated pretrained weight loading\n * passing through optional model_cfg for models with config based arch spec\n * features_only model adaptation\n * pruning config / model adaptation\n\n Args:\n model_cls (nn.Module): model class\n variant (str): model variant name\n pretrained (bool): load pretrained weights\n pretrained_cfg (dict): model's pretrained weight/task config\n model_cfg (Optional[Dict]): model's architecture config\n feature_cfg (Optional[Dict]: feature extraction adapter config\n pretrained_strict (bool): load pretrained weights strictly\n pretrained_filter_fn (Optional[Callable]): filter callable for pretrained weights\n kwargs_filter (Optional[Tuple]): kwargs to filter before passing to model\n **kwargs: model args passed through to model __init__\n \"\"\"\n pruned = kwargs.pop('pruned', False)\n features = False\n feature_cfg = feature_cfg or {}\n\n # resolve and update model pretrained config and model kwargs\n pretrained_cfg = resolve_pretrained_cfg(\n variant,\n pretrained_cfg=pretrained_cfg,\n pretrained_cfg_overlay=pretrained_cfg_overlay\n )\n\n # FIXME converting back to dict, PretrainedCfg use should be propagated further, but not into model\n pretrained_cfg = pretrained_cfg.to_dict()\n\n _update_default_kwargs(pretrained_cfg, kwargs, kwargs_filter)\n\n # Setup for feature extraction wrapper done at end of this fn\n if kwargs.pop('features_only', False):\n features = True\n feature_cfg.setdefault('out_indices', (0, 1, 2, 3, 4))\n if 'out_indices' in kwargs:\n feature_cfg['out_indices'] = kwargs.pop('out_indices')\n\n # Instantiate the model\n if model_cfg is None:\n model = model_cls(**kwargs)\n else:\n model = model_cls(cfg=model_cfg, **kwargs)\n model.pretrained_cfg = pretrained_cfg\n model.default_cfg = model.pretrained_cfg # alias for backwards compat\n\n if pruned:\n model = adapt_model_from_file(model, variant)\n\n # For classification models, check class attr, then kwargs, then default to 1k, otherwise 0 for feats\n num_classes_pretrained = 0 if features else getattr(model, 'num_classes', kwargs.get('num_classes', 1000))\n if pretrained:\n load_pretrained(\n model,\n pretrained_cfg=pretrained_cfg,\n num_classes=num_classes_pretrained,\n in_chans=kwargs.get('in_chans', 3),\n filter_fn=pretrained_filter_fn,\n strict=pretrained_strict,\n )\n\n # Wrap the model in a feature extraction module if enabled\n if features:\n feature_cls = FeatureListNet\n output_fmt = getattr(model, 'output_fmt', None)\n if output_fmt is not None:\n feature_cfg.setdefault('output_fmt', output_fmt)\n if 'feature_cls' in feature_cfg:\n feature_cls = feature_cfg.pop('feature_cls')\n if isinstance(feature_cls, str):\n feature_cls = feature_cls.lower()\n if 'hook' in feature_cls:\n feature_cls = FeatureHookNet\n elif feature_cls == 'fx':\n feature_cls = FeatureGraphNet\n else:\n assert False, f'Unknown feature class {feature_cls}'\n model = feature_cls(model, **feature_cfg)\n model.pretrained_cfg = pretrained_cfg_for_features(pretrained_cfg) # add back pretrained cfg\n model.default_cfg = model.pretrained_cfg # alias for rename backwards compat (default_cfg -> pretrained_cfg)\n\n return model" }, { "identifier": "named_apply", "path": "pytorch-image-models/timm/models/_manipulate.py", "snippet": "def named_apply(\n fn: Callable,\n module: nn.Module, name='',\n depth_first: bool = True,\n include_root: bool = False,\n) -> nn.Module:\n if not depth_first and include_root:\n fn(module=module, name=name)\n for child_name, child_module in module.named_children():\n child_name = '.'.join((name, child_name)) if name else child_name\n named_apply(fn=fn, module=child_module, name=child_name, depth_first=depth_first, include_root=True)\n if depth_first and include_root:\n fn(module=module, name=name)\n return module" }, { "identifier": "checkpoint_seq", "path": "pytorch-image-models/timm/models/_manipulate.py", "snippet": "def checkpoint_seq(\n functions,\n x,\n every=1,\n flatten=False,\n skip_last=False,\n preserve_rng_state=True\n):\n r\"\"\"A helper function for checkpointing sequential models.\n\n Sequential models execute a list of modules/functions in order\n (sequentially). Therefore, we can divide such a sequence into segments\n and checkpoint each segment. All segments except run in :func:`torch.no_grad`\n manner, i.e., not storing the intermediate activations. The inputs of each\n checkpointed segment will be saved for re-running the segment in the backward pass.\n\n See :func:`~torch.utils.checkpoint.checkpoint` on how checkpointing works.\n\n .. warning::\n Checkpointing currently only supports :func:`torch.autograd.backward`\n and only if its `inputs` argument is not passed. :func:`torch.autograd.grad`\n is not supported.\n\n .. warning:\n At least one of the inputs needs to have :code:`requires_grad=True` if\n grads are needed for model inputs, otherwise the checkpointed part of the\n model won't have gradients.\n\n Args:\n functions: A :class:`torch.nn.Sequential` or the list of modules or functions to run sequentially.\n x: A Tensor that is input to :attr:`functions`\n every: checkpoint every-n functions (default: 1)\n flatten (bool): flatten nn.Sequential of nn.Sequentials\n skip_last (bool): skip checkpointing the last function in the sequence if True\n preserve_rng_state (bool, optional, default=True): Omit stashing and restoring\n the RNG state during each checkpoint.\n\n Returns:\n Output of running :attr:`functions` sequentially on :attr:`*inputs`\n\n Example:\n >>> model = nn.Sequential(...)\n >>> input_var = checkpoint_seq(model, input_var, every=2)\n \"\"\"\n def run_function(start, end, functions):\n def forward(_x):\n for j in range(start, end + 1):\n _x = functions[j](_x)\n return _x\n return forward\n\n if isinstance(functions, torch.nn.Sequential):\n functions = functions.children()\n if flatten:\n functions = chain.from_iterable(functions)\n if not isinstance(functions, (tuple, list)):\n functions = tuple(functions)\n\n num_checkpointed = len(functions)\n if skip_last:\n num_checkpointed -= 1\n end = -1\n for start in range(0, num_checkpointed, every):\n end = min(start + every - 1, num_checkpointed - 1)\n x = checkpoint(run_function(start, end, functions), x, preserve_rng_state=preserve_rng_state)\n if skip_last:\n return run_function(end + 1, len(functions) - 1, functions)(x)\n return x" }, { "identifier": "adapt_input_conv", "path": "pytorch-image-models/timm/models/_manipulate.py", "snippet": "def adapt_input_conv(in_chans, conv_weight):\n conv_type = conv_weight.dtype\n conv_weight = conv_weight.float() # Some weights are in torch.half, ensure it's float for sum on CPU\n O, I, J, K = conv_weight.shape\n if in_chans == 1:\n if I > 3:\n assert conv_weight.shape[1] % 3 == 0\n # For models with space2depth stems\n conv_weight = conv_weight.reshape(O, I // 3, 3, J, K)\n conv_weight = conv_weight.sum(dim=2, keepdim=False)\n else:\n conv_weight = conv_weight.sum(dim=1, keepdim=True)\n elif in_chans != 3:\n if I != 3:\n raise NotImplementedError('Weight format not supported by conversion.')\n else:\n # NOTE this strategy should be better than random init, but there could be other combinations of\n # the original RGB input layer weights that'd work better for specific cases.\n repeat = int(math.ceil(in_chans / 3))\n conv_weight = conv_weight.repeat(1, repeat, 1, 1)[:, :in_chans, :, :]\n conv_weight *= (3 / float(in_chans))\n conv_weight = conv_weight.to(conv_type)\n return conv_weight" }, { "identifier": "generate_default_cfgs", "path": "pytorch-image-models/timm/models/_registry.py", "snippet": "def generate_default_cfgs(cfgs: Dict[str, Union[Dict[str, Any], PretrainedCfg]]):\n out = defaultdict(DefaultCfg)\n default_set = set() # no tag and tags ending with * are prioritized as default\n\n for k, v in cfgs.items():\n if isinstance(v, dict):\n v = PretrainedCfg(**v)\n has_weights = v.has_weights\n\n model, tag = split_model_name_tag(k)\n is_default_set = model in default_set\n priority = (has_weights and not tag) or (tag.endswith('*') and not is_default_set)\n tag = tag.strip('*')\n\n default_cfg = out[model]\n\n if priority:\n default_cfg.tags.appendleft(tag)\n default_set.add(model)\n elif has_weights and not default_cfg.is_pretrained:\n default_cfg.tags.appendleft(tag)\n else:\n default_cfg.tags.append(tag)\n\n if has_weights:\n default_cfg.is_pretrained = True\n\n default_cfg.cfgs[tag] = v\n\n return out" }, { "identifier": "register_model", "path": "pytorch-image-models/timm/models/_registry.py", "snippet": "def register_model(fn: Callable[..., Any]) -> Callable[..., Any]:\n # lookup containing module\n mod = sys.modules[fn.__module__]\n module_name_split = fn.__module__.split('.')\n module_name = module_name_split[-1] if len(module_name_split) else ''\n\n # add model to __all__ in module\n model_name = fn.__name__\n if hasattr(mod, '__all__'):\n mod.__all__.append(model_name)\n else:\n mod.__all__ = [model_name] # type: ignore\n\n # add entries to registry dict/sets\n _model_entrypoints[model_name] = fn\n _model_to_module[model_name] = module_name\n _module_to_models[module_name].add(model_name)\n if hasattr(mod, 'default_cfgs') and model_name in mod.default_cfgs:\n # this will catch all models that have entrypoint matching cfg key, but miss any aliasing\n # entrypoints or non-matching combos\n default_cfg = mod.default_cfgs[model_name]\n if not isinstance(default_cfg, DefaultCfg):\n # new style default cfg dataclass w/ multiple entries per model-arch\n assert isinstance(default_cfg, dict)\n # old style cfg dict per model-arch\n pretrained_cfg = PretrainedCfg(**default_cfg)\n default_cfg = DefaultCfg(tags=deque(['']), cfgs={'': pretrained_cfg})\n\n for tag_idx, tag in enumerate(default_cfg.tags):\n is_default = tag_idx == 0\n pretrained_cfg = default_cfg.cfgs[tag]\n model_name_tag = '.'.join([model_name, tag]) if tag else model_name\n replace_items = dict(architecture=model_name, tag=tag if tag else None)\n if pretrained_cfg.hf_hub_id and pretrained_cfg.hf_hub_id == 'timm/':\n # auto-complete hub name w/ architecture.tag\n replace_items['hf_hub_id'] = pretrained_cfg.hf_hub_id + model_name_tag\n pretrained_cfg = replace(pretrained_cfg, **replace_items)\n\n if is_default:\n _model_pretrained_cfgs[model_name] = pretrained_cfg\n if pretrained_cfg.has_weights:\n # add tagless entry if it's default and has weights\n _model_has_pretrained.add(model_name)\n\n if tag:\n _model_pretrained_cfgs[model_name_tag] = pretrained_cfg\n if pretrained_cfg.has_weights:\n # add model w/ tag if tag is valid\n _model_has_pretrained.add(model_name_tag)\n _model_with_tags[model_name].append(model_name_tag)\n else:\n _model_with_tags[model_name].append(model_name) # has empty tag (to slowly remove these instances)\n\n _model_default_cfgs[model_name] = default_cfg\n\n return fn" }, { "identifier": "register_model_deprecations", "path": "pytorch-image-models/timm/models/_registry.py", "snippet": "def register_model_deprecations(module_name: str, deprecation_map: Dict[str, Optional[str]]):\n mod = sys.modules[module_name]\n module_name_split = module_name.split('.')\n module_name = module_name_split[-1] if len(module_name_split) else ''\n\n for deprecated, current in deprecation_map.items():\n if hasattr(mod, '__all__'):\n mod.__all__.append(deprecated)\n current_fn = None\n current_tag = ''\n if current:\n current_name, current_tag = split_model_name_tag(current)\n current_fn = getattr(mod, current_name)\n deprecated_entrypoint_fn = _deprecated_model_shim(deprecated, current_fn, current_tag)\n setattr(mod, deprecated, deprecated_entrypoint_fn)\n _model_entrypoints[deprecated] = deprecated_entrypoint_fn\n _model_to_module[deprecated] = module_name\n _module_to_models[module_name].add(deprecated)\n _deprecated_models[deprecated] = current\n _module_to_deprecated_models[module_name][deprecated] = current" } ]
import logging import math import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint import numpy as np import re import re from collections import OrderedDict from functools import partial from typing import Callable, List, Optional, Sequence, Tuple, Type, Union from torch.jit import Final from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD, \ OPENAI_CLIP_MEAN, OPENAI_CLIP_STD from timm.layers import PatchEmbed, Mlp, DropPath, AttentionPoolLatent, RmsNorm, PatchDropout, SwiGLUPacked, \ trunc_normal_, lecun_normal_, resample_patch_embed, resample_abs_pos_embed, use_fused_attn, \ get_act_layer, get_norm_layer, LayerType from ._builder import build_model_with_cfg from ._manipulate import named_apply, checkpoint_seq, adapt_input_conv from ._registry import generate_default_cfgs, register_model, register_model_deprecations from timm.layers import get_act_layer
8,679
'vit_base_patch16_224.mae': _cfg( url='https://dl.fbaipublicfiles.com/mae/pretrain/mae_pretrain_vit_base.pth', hf_hub_id='timm/', license='cc-by-nc-4.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), 'vit_large_patch16_224.mae': _cfg( url='https://dl.fbaipublicfiles.com/mae/pretrain/mae_pretrain_vit_large.pth', hf_hub_id='timm/', license='cc-by-nc-4.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), 'vit_huge_patch14_224.mae': _cfg( url='https://dl.fbaipublicfiles.com/mae/pretrain/mae_pretrain_vit_huge.pth', hf_hub_id='timm/', license='cc-by-nc-4.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), 'vit_huge_patch14_gap_224.in1k_ijepa': _cfg( url='https://dl.fbaipublicfiles.com/ijepa/IN1K-vit.h.14-300e.pth.tar', # hf_hub_id='timm/', license='cc-by-nc-4.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), 'vit_huge_patch14_gap_224.in22k_ijepa': _cfg( url='https://dl.fbaipublicfiles.com/ijepa/IN22K-vit.h.14-900e.pth.tar', # hf_hub_id='timm/', license='cc-by-nc-4.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), 'vit_huge_patch16_gap_448.in1k_ijepa': _cfg( url='https://dl.fbaipublicfiles.com/ijepa/IN1K-vit.h.16-448px-300e.pth.tar', # hf_hub_id='timm/', license='cc-by-nc-4.0', input_size=(3, 448, 448), crop_pct=1.0, mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), 'vit_giant_patch16_gap_224.in22k_ijepa': _cfg( url='https://dl.fbaipublicfiles.com/ijepa/IN22K-vit.g.16-600e.pth.tar', # hf_hub_id='timm/', license='cc-by-nc-4.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), 'vit_base_patch16_siglip_224.webli': _cfg( hf_hub_id='timm/ViT-B-16-SigLIP', hf_hub_filename='open_clip_pytorch_model.bin', num_classes=0), 'vit_base_patch16_siglip_256.webli': _cfg( hf_hub_id='timm/ViT-B-16-SigLIP-256', hf_hub_filename='open_clip_pytorch_model.bin', input_size=(3, 256, 256), num_classes=0), 'vit_base_patch16_siglip_384.webli': _cfg( hf_hub_id='timm/ViT-B-16-SigLIP-384', hf_hub_filename='open_clip_pytorch_model.bin', input_size=(3, 384, 384), num_classes=0), 'vit_base_patch16_siglip_512.webli': _cfg( hf_hub_id='timm/ViT-B-16-SigLIP-512', hf_hub_filename='open_clip_pytorch_model.bin', input_size=(3, 512, 512), num_classes=0), 'vit_large_patch16_siglip_256.webli': _cfg( hf_hub_id='timm/ViT-L-16-SigLIP-256', hf_hub_filename='open_clip_pytorch_model.bin', input_size=(3, 256, 256), num_classes=0), 'vit_large_patch16_siglip_384.webli': _cfg( hf_hub_id='timm/ViT-L-16-SigLIP-384', hf_hub_filename='open_clip_pytorch_model.bin', input_size=(3, 384, 384), num_classes=0), 'vit_so400m_patch14_siglip_224.webli': _cfg( hf_hub_id='timm/ViT-SO400M-14-SigLIP', hf_hub_filename='open_clip_pytorch_model.bin', num_classes=0), 'vit_so400m_patch14_siglip_384.webli': _cfg( hf_hub_id='timm/ViT-SO400M-14-SigLIP-384', hf_hub_filename='open_clip_pytorch_model.bin', input_size=(3, 384, 384), num_classes=0), 'vit_medium_patch16_reg4_256': _cfg( input_size=(3, 256, 256)), 'vit_medium_patch16_reg4_gap_256': _cfg( input_size=(3, 256, 256)), 'vit_base_patch16_reg8_gap_256': _cfg(input_size=(3, 256, 256)), } _quick_gelu_cfgs = [ 'vit_large_patch14_clip_224.dfn2b', 'vit_huge_patch14_clip_224.dfn5b', 'vit_huge_patch14_clip_378.dfn5b', 'vit_base_patch32_clip_224.metaclip_2pt5b', 'vit_base_patch16_clip_224.metaclip_2pt5b', 'vit_large_patch14_clip_224.metaclip_2pt5b', 'vit_huge_patch14_clip_224.metaclip_2pt5b', 'vit_base_patch32_clip_224.openai', 'vit_base_patch16_clip_224.openai', 'vit_large_patch14_clip_224.openai', 'vit_large_patch14_clip_336.openai', ] default_cfgs.update({ n.replace('_clip_', '_clip_quickgelu_'): default_cfgs[n] for n in _quick_gelu_cfgs }) default_cfgs = generate_default_cfgs(default_cfgs) def _create_vision_transformer(variant, pretrained=False, **kwargs): if kwargs.get('features_only', None): raise RuntimeError('features_only not implemented for Vision Transformer models.') if 'flexi' in variant: # FIXME Google FlexiViT pretrained models have a strong preference for bilinear patch / embed # interpolation, other pretrained models resize better w/ anti-aliased bicubic interpolation. _filter_fn = partial(checkpoint_filter_fn, interpolation='bilinear', antialias=False) else: _filter_fn = checkpoint_filter_fn # FIXME attn pool (currently only in siglip) params removed if pool disabled, is there a better soln? strict = True if 'siglip' in variant and kwargs.get('global_pool', None) != 'map': strict = False
""" Vision Transformer (ViT) in PyTorch A PyTorch implement of Vision Transformers as described in: 'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale' - https://arxiv.org/abs/2010.11929 `How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers` - https://arxiv.org/abs/2106.10270 `FlexiViT: One Model for All Patch Sizes` - https://arxiv.org/abs/2212.08013 The official jax code is released and available at * https://github.com/google-research/vision_transformer * https://github.com/google-research/big_vision Acknowledgments: * The paper authors for releasing code and weights, thanks! * I fixed my class token impl based on Phil Wang's https://github.com/lucidrains/vit-pytorch * Simple transformer style inspired by Andrej Karpathy's https://github.com/karpathy/minGPT * Bert reference code checks against Huggingface Transformers and Tensorflow Bert Hacked together by / Copyright 2020, Ross Wightman """ __all__ = ['VisionTransformer'] # model_registry will add each entrypoint fn to this _logger = logging.getLogger(__name__) class Attention(nn.Module): fused_attn: Final[bool] def __init__( self, dim, num_heads=8, qkv_bias=False, qk_norm=False, attn_drop=0., proj_drop=0., norm_layer=nn.LayerNorm, ): super().__init__() assert dim % num_heads == 0, 'dim should be divisible by num_heads' self.num_heads = num_heads self.head_dim = dim // num_heads self.scale = self.head_dim ** -0.5 self.fused_attn = use_fused_attn() self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) q, k, v = qkv.unbind(0) q, k = self.q_norm(q), self.k_norm(k) if self.fused_attn: x = F.scaled_dot_product_attention( q, k, v, dropout_p=self.attn_drop.p if self.training else 0., ) else: q = q * self.scale attn = q @ k.transpose(-2, -1) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = attn @ v x = x.transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class LayerScale(nn.Module): def __init__(self, dim, init_values=1e-5, inplace=False): super().__init__() self.inplace = inplace self.gamma = nn.Parameter(init_values * torch.ones(dim)) def forward(self, x): return x.mul_(self.gamma) if self.inplace else x * self.gamma class Block(nn.Module): def __init__( self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_norm=False, proj_drop=0., attn_drop=0., init_values=None, drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, mlp_layer=Mlp, ): super().__init__() self.norm1 = norm_layer(dim) self.attn = Attention( dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_norm=qk_norm, attn_drop=attn_drop, proj_drop=proj_drop, norm_layer=norm_layer, ) self.ls1 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity() self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) self.mlp = mlp_layer( in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop, ) self.ls2 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity() self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x): x = x + self.drop_path1(self.ls1(self.attn(self.norm1(x)))) x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) return x class ResPostBlock(nn.Module): def __init__( self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_norm=False, proj_drop=0., attn_drop=0., init_values=None, drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, mlp_layer=Mlp, ): super().__init__() self.init_values = init_values self.attn = Attention( dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_norm=qk_norm, attn_drop=attn_drop, proj_drop=proj_drop, norm_layer=norm_layer, ) self.norm1 = norm_layer(dim) self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.mlp = mlp_layer( in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop, ) self.norm2 = norm_layer(dim) self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.init_weights() def init_weights(self): # NOTE this init overrides that base model init with specific changes for the block type if self.init_values is not None: nn.init.constant_(self.norm1.weight, self.init_values) nn.init.constant_(self.norm2.weight, self.init_values) def forward(self, x): x = x + self.drop_path1(self.norm1(self.attn(x))) x = x + self.drop_path2(self.norm2(self.mlp(x))) return x class ParallelScalingBlock(nn.Module): """ Parallel ViT block (MLP & Attention in parallel) Based on: 'Scaling Vision Transformers to 22 Billion Parameters` - https://arxiv.org/abs/2302.05442 """ fused_attn: Final[bool] def __init__( self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_norm=False, proj_drop=0., attn_drop=0., init_values=None, drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, mlp_layer=None, # NOTE: not used ): super().__init__() assert dim % num_heads == 0, 'dim should be divisible by num_heads' self.num_heads = num_heads self.head_dim = dim // num_heads self.scale = self.head_dim ** -0.5 self.fused_attn = use_fused_attn() mlp_hidden_dim = int(mlp_ratio * dim) in_proj_out_dim = mlp_hidden_dim + 3 * dim self.in_norm = norm_layer(dim) self.in_proj = nn.Linear(dim, in_proj_out_dim, bias=qkv_bias) self.in_split = [mlp_hidden_dim] + [dim] * 3 if qkv_bias: self.register_buffer('qkv_bias', None) self.register_parameter('mlp_bias', None) else: self.register_buffer('qkv_bias', torch.zeros(3 * dim), persistent=False) self.mlp_bias = nn.Parameter(torch.zeros(mlp_hidden_dim)) self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() self.attn_drop = nn.Dropout(attn_drop) self.attn_out_proj = nn.Linear(dim, dim) self.mlp_drop = nn.Dropout(proj_drop) self.mlp_act = act_layer() self.mlp_out_proj = nn.Linear(mlp_hidden_dim, dim) self.ls = LayerScale(dim, init_values=init_values) if init_values is not None else nn.Identity() self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x): B, N, C = x.shape # Combined MLP fc1 & qkv projections y = self.in_norm(x) if self.mlp_bias is not None: # Concat constant zero-bias for qkv w/ trainable mlp_bias. # Appears faster than adding to x_mlp separately y = F.linear(y, self.in_proj.weight, torch.cat((self.qkv_bias, self.mlp_bias))) else: y = self.in_proj(y) x_mlp, q, k, v = torch.split(y, self.in_split, dim=-1) # Dot product attention w/ qk norm q = self.q_norm(q.view(B, N, self.num_heads, self.head_dim)).transpose(1, 2) k = self.k_norm(k.view(B, N, self.num_heads, self.head_dim)).transpose(1, 2) v = v.view(B, N, self.num_heads, self.head_dim).transpose(1, 2) if self.fused_attn: x_attn = F.scaled_dot_product_attention( q, k, v, dropout_p=self.attn_drop.p if self.training else 0., ) else: q = q * self.scale attn = q @ k.transpose(-2, -1) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x_attn = attn @ v x_attn = x_attn.transpose(1, 2).reshape(B, N, C) x_attn = self.attn_out_proj(x_attn) # MLP activation, dropout, fc2 x_mlp = self.mlp_act(x_mlp) x_mlp = self.mlp_drop(x_mlp) x_mlp = self.mlp_out_proj(x_mlp) # Add residual w/ drop path & layer scale applied y = self.drop_path(self.ls(x_attn + x_mlp)) x = x + y return x class ParallelThingsBlock(nn.Module): """ Parallel ViT block (N parallel attention followed by N parallel MLP) Based on: `Three things everyone should know about Vision Transformers` - https://arxiv.org/abs/2203.09795 """ def __init__( self, dim, num_heads, num_parallel=2, mlp_ratio=4., qkv_bias=False, qk_norm=False, init_values=None, proj_drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, mlp_layer=Mlp, ): super().__init__() self.num_parallel = num_parallel self.attns = nn.ModuleList() self.ffns = nn.ModuleList() for _ in range(num_parallel): self.attns.append(nn.Sequential(OrderedDict([ ('norm', norm_layer(dim)), ('attn', Attention( dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_norm=qk_norm, attn_drop=attn_drop, proj_drop=proj_drop, norm_layer=norm_layer, )), ('ls', LayerScale(dim, init_values=init_values) if init_values else nn.Identity()), ('drop_path', DropPath(drop_path) if drop_path > 0. else nn.Identity()) ]))) self.ffns.append(nn.Sequential(OrderedDict([ ('norm', norm_layer(dim)), ('mlp', mlp_layer( dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop, )), ('ls', LayerScale(dim, init_values=init_values) if init_values else nn.Identity()), ('drop_path', DropPath(drop_path) if drop_path > 0. else nn.Identity()) ]))) def _forward_jit(self, x): x = x + torch.stack([attn(x) for attn in self.attns]).sum(dim=0) x = x + torch.stack([ffn(x) for ffn in self.ffns]).sum(dim=0) return x @torch.jit.ignore def _forward(self, x): x = x + sum(attn(x) for attn in self.attns) x = x + sum(ffn(x) for ffn in self.ffns) return x def forward(self, x): if torch.jit.is_scripting() or torch.jit.is_tracing(): return self._forward_jit(x) else: return self._forward(x) class VisionTransformer(nn.Module): """ Vision Transformer A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` - https://arxiv.org/abs/2010.11929 """ dynamic_img_size: Final[bool] def __init__( self, img_size: Union[int, Tuple[int, int]] = 224, patch_size: Union[int, Tuple[int, int]] = 16, in_chans: int = 3, num_classes: int = 1000, global_pool: str = 'token', embed_dim: int = 768, depth: int = 12, num_heads: int = 12, mlp_ratio: float = 4., qkv_bias: bool = True, qk_norm: bool = False, init_values: Optional[float] = None, class_token: bool = True, no_embed_class: bool = False, reg_tokens: int = 0, pre_norm: bool = False, fc_norm: Optional[bool] = None, dynamic_img_size: bool = False, dynamic_img_pad: bool = False, drop_rate: float = 0., pos_drop_rate: float = 0., patch_drop_rate: float = 0., proj_drop_rate: float = 0., attn_drop_rate: float = 0., drop_path_rate: float = 0., weight_init: str = '', embed_layer: Callable = PatchEmbed, norm_layer: Optional[LayerType] = None, act_layer: Optional[LayerType] = None, block_fn: Type[nn.Module] = Block, mlp_layer: Type[nn.Module] = Mlp, ): """ Args: img_size: Input image size. patch_size: Patch size. in_chans: Number of image input channels. num_classes: Mumber of classes for classification head. global_pool: Type of global pooling for final sequence (default: 'token'). embed_dim: Transformer embedding dimension. depth: Depth of transformer. num_heads: Number of attention heads. mlp_ratio: Ratio of mlp hidden dim to embedding dim. qkv_bias: Enable bias for qkv projections if True. init_values: Layer-scale init values (layer-scale enabled if not None). class_token: Use class token. no_embed_class: Don't include position embeddings for class (or reg) tokens. reg_tokens: Number of register tokens. fc_norm: Pre head norm after pool (instead of before), if None, enabled when global_pool == 'avg'. drop_rate: Head dropout rate. pos_drop_rate: Position embedding dropout rate. attn_drop_rate: Attention dropout rate. drop_path_rate: Stochastic depth rate. weight_init: Weight initialization scheme. embed_layer: Patch embedding layer. norm_layer: Normalization layer. act_layer: MLP activation layer. block_fn: Transformer block layer. """ super().__init__() assert global_pool in ('', 'avg', 'token', 'map') assert class_token or global_pool != 'token' use_fc_norm = global_pool == 'avg' if fc_norm is None else fc_norm norm_layer = get_norm_layer(norm_layer) or partial(nn.LayerNorm, eps=1e-6) act_layer = get_act_layer(act_layer) or nn.GELU self.num_classes = num_classes self.global_pool = global_pool self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models self.num_prefix_tokens = 1 if class_token else 0 self.num_prefix_tokens += reg_tokens self.num_reg_tokens = reg_tokens self.has_class_token = class_token self.no_embed_class = no_embed_class # don't embed prefix positions (includes reg) self.dynamic_img_size = dynamic_img_size self.grad_checkpointing = False embed_args = {} if dynamic_img_size: # flatten deferred until after pos embed embed_args.update(dict(strict_img_size=False, output_fmt='NHWC')) self.patch_embed = embed_layer( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, bias=not pre_norm, # disable bias if pre-norm is used (e.g. CLIP) dynamic_img_pad=dynamic_img_pad, **embed_args, ) num_patches = self.patch_embed.num_patches self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if class_token else None self.reg_token = nn.Parameter(torch.zeros(1, reg_tokens, embed_dim)) if reg_tokens else None embed_len = num_patches if no_embed_class else num_patches + self.num_prefix_tokens self.pos_embed = nn.Parameter(torch.randn(1, embed_len, embed_dim) * .02) self.pos_drop = nn.Dropout(p=pos_drop_rate) if patch_drop_rate > 0: self.patch_drop = PatchDropout( patch_drop_rate, num_prefix_tokens=self.num_prefix_tokens, ) else: self.patch_drop = nn.Identity() self.norm_pre = norm_layer(embed_dim) if pre_norm else nn.Identity() dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule self.blocks = nn.Sequential(*[ block_fn( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_norm=qk_norm, init_values=init_values, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer, mlp_layer=mlp_layer, ) for i in range(depth)]) self.norm = norm_layer(embed_dim) if not use_fc_norm else nn.Identity() # Classifier Head if global_pool == 'map': self.attn_pool = AttentionPoolLatent( self.embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, norm_layer=norm_layer, ) else: self.attn_pool = None self.fc_norm = norm_layer(embed_dim) if use_fc_norm else nn.Identity() self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() if weight_init != 'skip': self.init_weights(weight_init) def init_weights(self, mode=''): assert mode in ('jax', 'jax_nlhb', 'moco', '') head_bias = -math.log(self.num_classes) if 'nlhb' in mode else 0. trunc_normal_(self.pos_embed, std=.02) if self.cls_token is not None: nn.init.normal_(self.cls_token, std=1e-6) named_apply(get_init_weights_vit(mode, head_bias), self) def _init_weights(self, m): # this fn left here for compat with downstream users init_weights_vit_timm(m) @torch.jit.ignore() def load_pretrained(self, checkpoint_path, prefix=''): _load_weights(self, checkpoint_path, prefix) @torch.jit.ignore def no_weight_decay(self): return {'pos_embed', 'cls_token', 'dist_token'} @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^cls_token|pos_embed|patch_embed', # stem and embed blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))] ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head def reset_classifier(self, num_classes: int, global_pool=None): self.num_classes = num_classes if global_pool is not None: assert global_pool in ('', 'avg', 'token', 'map') if global_pool == 'map' and self.attn_pool is None: assert False, "Cannot currently add attention pooling in reset_classifier()." elif global_pool != 'map ' and self.attn_pool is not None: self.attn_pool = None # remove attention pooling self.global_pool = global_pool self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() def _pos_embed(self, x): if self.dynamic_img_size: B, H, W, C = x.shape pos_embed = resample_abs_pos_embed( self.pos_embed, (H, W), num_prefix_tokens=0 if self.no_embed_class else self.num_prefix_tokens, ) x = x.view(B, -1, C) else: pos_embed = self.pos_embed to_cat = [] if self.cls_token is not None: to_cat.append(self.cls_token.expand(x.shape[0], -1, -1)) if self.reg_token is not None: to_cat.append(self.reg_token.expand(x.shape[0], -1, -1)) if self.no_embed_class: # deit-3, updated JAX (big vision) # position embedding does not overlap with class token, add then concat x = x + pos_embed if to_cat: x = torch.cat(to_cat + [x], dim=1) else: # original timm, JAX, and deit vit impl # pos_embed has entry for class token, concat then add if to_cat: x = torch.cat(to_cat + [x], dim=1) x = x + pos_embed return self.pos_drop(x) def _intermediate_layers( self, x: torch.Tensor, n: Union[int, Sequence] = 1, ): outputs, num_blocks = [], len(self.blocks) take_indices = set(range(num_blocks - n, num_blocks) if isinstance(n, int) else n) # forward pass x = self.patch_embed(x) x = self._pos_embed(x) x = self.patch_drop(x) x = self.norm_pre(x) for i, blk in enumerate(self.blocks): x = blk(x) if i in take_indices: outputs.append(x) return outputs def get_intermediate_layers( self, x: torch.Tensor, n: Union[int, Sequence] = 1, reshape: bool = False, return_prefix_tokens: bool = False, norm: bool = False, ) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]]]: """ Intermediate layer accessor (NOTE: This is a WIP experiment). Inspired by DINO / DINOv2 interface """ # take last n blocks if n is an int, if in is a sequence, select by matching indices outputs = self._intermediate_layers(x, n) if norm: outputs = [self.norm(out) for out in outputs] prefix_tokens = [out[:, 0:self.num_prefix_tokens] for out in outputs] outputs = [out[:, self.num_prefix_tokens:] for out in outputs] if reshape: grid_size = self.patch_embed.grid_size outputs = [ out.reshape(x.shape[0], grid_size[0], grid_size[1], -1).permute(0, 3, 1, 2).contiguous() for out in outputs ] if return_prefix_tokens: return tuple(zip(outputs, prefix_tokens)) return tuple(outputs) def forward_features(self, x): x = self.patch_embed(x) x = self._pos_embed(x) x = self.patch_drop(x) x = self.norm_pre(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool = False): if self.attn_pool is not None: x = self.attn_pool(x) elif self.global_pool == 'avg': x = x[:, self.num_prefix_tokens:].mean(dim=1) elif self.global_pool: x = x[:, 0] # class token x = self.fc_norm(x) x = self.head_drop(x) return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def init_weights_vit_timm(module: nn.Module, name: str = ''): """ ViT weight initialization, original timm impl (for reproducibility) """ if isinstance(module, nn.Linear): trunc_normal_(module.weight, std=.02) if module.bias is not None: nn.init.zeros_(module.bias) elif hasattr(module, 'init_weights'): module.init_weights() def init_weights_vit_jax(module: nn.Module, name: str = '', head_bias: float = 0.): """ ViT weight initialization, matching JAX (Flax) impl """ if isinstance(module, nn.Linear): if name.startswith('head'): nn.init.zeros_(module.weight) nn.init.constant_(module.bias, head_bias) else: nn.init.xavier_uniform_(module.weight) if module.bias is not None: nn.init.normal_(module.bias, std=1e-6) if 'mlp' in name else nn.init.zeros_(module.bias) elif isinstance(module, nn.Conv2d): lecun_normal_(module.weight) if module.bias is not None: nn.init.zeros_(module.bias) elif hasattr(module, 'init_weights'): module.init_weights() def init_weights_vit_moco(module: nn.Module, name: str = ''): """ ViT weight initialization, matching moco-v3 impl minus fixed PatchEmbed """ if isinstance(module, nn.Linear): if 'qkv' in name: # treat the weights of Q, K, V separately val = math.sqrt(6. / float(module.weight.shape[0] // 3 + module.weight.shape[1])) nn.init.uniform_(module.weight, -val, val) else: nn.init.xavier_uniform_(module.weight) if module.bias is not None: nn.init.zeros_(module.bias) elif hasattr(module, 'init_weights'): module.init_weights() def get_init_weights_vit(mode='jax', head_bias: float = 0.): if 'jax' in mode: return partial(init_weights_vit_jax, head_bias=head_bias) elif 'moco' in mode: return init_weights_vit_moco else: return init_weights_vit_timm def resize_pos_embed( posemb, posemb_new, num_prefix_tokens=1, gs_new=(), interpolation='bicubic', antialias=False, ): """ Rescale the grid of position embeddings when loading from state_dict. *DEPRECATED* This function is being deprecated in favour of resample_abs_pos_embed Adapted from: https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224 """ ntok_new = posemb_new.shape[1] if num_prefix_tokens: posemb_prefix, posemb_grid = posemb[:, :num_prefix_tokens], posemb[0, num_prefix_tokens:] ntok_new -= num_prefix_tokens else: posemb_prefix, posemb_grid = posemb[:, :0], posemb[0] gs_old = int(math.sqrt(len(posemb_grid))) if not len(gs_new): # backwards compatibility gs_new = [int(math.sqrt(ntok_new))] * 2 assert len(gs_new) >= 2 _logger.info(f'Resized position embedding: {posemb.shape} ({[gs_old, gs_old]}) to {posemb_new.shape} ({gs_new}).') posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2) posemb_grid = F.interpolate(posemb_grid, size=gs_new, mode=interpolation, antialias=antialias, align_corners=False) posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new[0] * gs_new[1], -1) posemb = torch.cat([posemb_prefix, posemb_grid], dim=1) return posemb @torch.no_grad() def _load_weights(model: VisionTransformer, checkpoint_path: str, prefix: str = ''): """ Load weights from .npz checkpoints for official Google Brain Flax implementation """ def _n2p(w, t=True): if w.ndim == 4 and w.shape[0] == w.shape[1] == w.shape[2] == 1: w = w.flatten() if t: if w.ndim == 4: w = w.transpose([3, 2, 0, 1]) elif w.ndim == 3: w = w.transpose([2, 0, 1]) elif w.ndim == 2: w = w.transpose([1, 0]) return torch.from_numpy(w) w = np.load(checkpoint_path) interpolation = 'bilinear' antialias = False big_vision = False if not prefix: if 'opt/target/embedding/kernel' in w: prefix = 'opt/target/' elif 'params/embedding/kernel' in w: prefix = 'params/' big_vision = True elif 'params/img/embedding/kernel' in w: prefix = 'params/img/' big_vision = True if hasattr(model.patch_embed, 'backbone'): # hybrid backbone = model.patch_embed.backbone stem_only = not hasattr(backbone, 'stem') stem = backbone if stem_only else backbone.stem stem.conv.weight.copy_(adapt_input_conv(stem.conv.weight.shape[1], _n2p(w[f'{prefix}conv_root/kernel']))) stem.norm.weight.copy_(_n2p(w[f'{prefix}gn_root/scale'])) stem.norm.bias.copy_(_n2p(w[f'{prefix}gn_root/bias'])) if not stem_only: for i, stage in enumerate(backbone.stages): for j, block in enumerate(stage.blocks): bp = f'{prefix}block{i + 1}/unit{j + 1}/' for r in range(3): getattr(block, f'conv{r + 1}').weight.copy_(_n2p(w[f'{bp}conv{r + 1}/kernel'])) getattr(block, f'norm{r + 1}').weight.copy_(_n2p(w[f'{bp}gn{r + 1}/scale'])) getattr(block, f'norm{r + 1}').bias.copy_(_n2p(w[f'{bp}gn{r + 1}/bias'])) if block.downsample is not None: block.downsample.conv.weight.copy_(_n2p(w[f'{bp}conv_proj/kernel'])) block.downsample.norm.weight.copy_(_n2p(w[f'{bp}gn_proj/scale'])) block.downsample.norm.bias.copy_(_n2p(w[f'{bp}gn_proj/bias'])) embed_conv_w = _n2p(w[f'{prefix}embedding/kernel']) else: embed_conv_w = adapt_input_conv( model.patch_embed.proj.weight.shape[1], _n2p(w[f'{prefix}embedding/kernel'])) if embed_conv_w.shape[-2:] != model.patch_embed.proj.weight.shape[-2:]: embed_conv_w = resample_patch_embed( embed_conv_w, model.patch_embed.proj.weight.shape[-2:], interpolation=interpolation, antialias=antialias, verbose=True, ) model.patch_embed.proj.weight.copy_(embed_conv_w) model.patch_embed.proj.bias.copy_(_n2p(w[f'{prefix}embedding/bias'])) if model.cls_token is not None: model.cls_token.copy_(_n2p(w[f'{prefix}cls'], t=False)) if big_vision: pos_embed_w = _n2p(w[f'{prefix}pos_embedding'], t=False) else: pos_embed_w = _n2p(w[f'{prefix}Transformer/posembed_input/pos_embedding'], t=False) if pos_embed_w.shape != model.pos_embed.shape: old_shape = pos_embed_w.shape num_prefix_tokens = 0 if getattr(model, 'no_embed_class', False) else getattr(model, 'num_prefix_tokens', 1) pos_embed_w = resample_abs_pos_embed( # resize pos embedding when different size from pretrained weights pos_embed_w, new_size=model.patch_embed.grid_size, num_prefix_tokens=num_prefix_tokens, interpolation=interpolation, antialias=antialias, verbose=True, ) model.pos_embed.copy_(pos_embed_w) model.norm.weight.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/scale'])) model.norm.bias.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/bias'])) if (isinstance(model.head, nn.Linear) and f'{prefix}head/bias' in w and model.head.bias.shape[0] == w[f'{prefix}head/bias'].shape[-1]): model.head.weight.copy_(_n2p(w[f'{prefix}head/kernel'])) model.head.bias.copy_(_n2p(w[f'{prefix}head/bias'])) # NOTE representation layer has been removed, not used in latest 21k/1k pretrained weights # if isinstance(getattr(model.pre_logits, 'fc', None), nn.Linear) and f'{prefix}pre_logits/bias' in w: # model.pre_logits.fc.weight.copy_(_n2p(w[f'{prefix}pre_logits/kernel'])) # model.pre_logits.fc.bias.copy_(_n2p(w[f'{prefix}pre_logits/bias'])) if model.attn_pool is not None: block_prefix = f'{prefix}MAPHead_0/' mha_prefix = block_prefix + f'MultiHeadDotProductAttention_0/' model.attn_pool.latent.copy_(_n2p(w[f'{block_prefix}probe'], t=False)) model.attn_pool.kv.weight.copy_(torch.cat([ _n2p(w[f'{mha_prefix}{n}/kernel'], t=False).flatten(1).T for n in ('key', 'value')])) model.attn_pool.kv.bias.copy_(torch.cat([ _n2p(w[f'{mha_prefix}{n}/bias'], t=False).reshape(-1) for n in ('key', 'value')])) model.attn_pool.q.weight.copy_(_n2p(w[f'{mha_prefix}query/kernel'], t=False).flatten(1).T) model.attn_pool.q.bias.copy_(_n2p(w[f'{mha_prefix}query/bias'], t=False).reshape(-1)) model.attn_pool.proj.weight.copy_(_n2p(w[f'{mha_prefix}out/kernel']).flatten(1)) model.attn_pool.proj.bias.copy_(_n2p(w[f'{mha_prefix}out/bias'])) model.attn_pool.norm.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/scale'])) model.attn_pool.norm.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/bias'])) for r in range(2): getattr(model.attn_pool.mlp, f'fc{r + 1}').weight.copy_(_n2p(w[f'{block_prefix}MlpBlock_0/Dense_{r}/kernel'])) getattr(model.attn_pool.mlp, f'fc{r + 1}').bias.copy_(_n2p(w[f'{block_prefix}MlpBlock_0/Dense_{r}/bias'])) mha_sub, b_sub, ln1_sub = (0, 0, 1) if big_vision else (1, 3, 2) for i, block in enumerate(model.blocks.children()): block_prefix = f'{prefix}Transformer/encoderblock_{i}/' mha_prefix = block_prefix + f'MultiHeadDotProductAttention_{mha_sub}/' block.norm1.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/scale'])) block.norm1.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/bias'])) block.attn.qkv.weight.copy_(torch.cat([ _n2p(w[f'{mha_prefix}{n}/kernel'], t=False).flatten(1).T for n in ('query', 'key', 'value')])) block.attn.qkv.bias.copy_(torch.cat([ _n2p(w[f'{mha_prefix}{n}/bias'], t=False).reshape(-1) for n in ('query', 'key', 'value')])) block.attn.proj.weight.copy_(_n2p(w[f'{mha_prefix}out/kernel']).flatten(1)) block.attn.proj.bias.copy_(_n2p(w[f'{mha_prefix}out/bias'])) block.norm2.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_{ln1_sub}/scale'])) block.norm2.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_{ln1_sub}/bias'])) for r in range(2): getattr(block.mlp, f'fc{r + 1}').weight.copy_(_n2p(w[f'{block_prefix}MlpBlock_{b_sub}/Dense_{r}/kernel'])) getattr(block.mlp, f'fc{r + 1}').bias.copy_(_n2p(w[f'{block_prefix}MlpBlock_{b_sub}/Dense_{r}/bias'])) def _convert_openai_clip(state_dict, model, prefix='visual.'): out_dict = {} swaps = [ ('conv1', 'patch_embed.proj'), ('positional_embedding', 'pos_embed'), ('transformer.resblocks.', 'blocks.'), ('ln_pre', 'norm_pre'), ('ln_post', 'norm'), ('ln_', 'norm'), ('in_proj_', 'qkv.'), ('out_proj', 'proj'), ('mlp.c_fc', 'mlp.fc1'), ('mlp.c_proj', 'mlp.fc2'), ] for k, v in state_dict.items(): if not k.startswith(prefix): continue k = k.replace(prefix, '') for sp in swaps: k = k.replace(sp[0], sp[1]) if k == 'proj': k = 'head.weight' v = v.transpose(0, 1) out_dict['head.bias'] = torch.zeros(v.shape[0]) elif k == 'class_embedding': k = 'cls_token' v = v.unsqueeze(0).unsqueeze(1) elif k == 'pos_embed': v = v.unsqueeze(0) if v.shape[1] != model.pos_embed.shape[1]: # To resize pos embedding when using model at different size from pretrained weights v = resize_pos_embed( v, model.pos_embed, 0 if getattr(model, 'no_embed_class') else getattr(model, 'num_prefix_tokens', 1), model.patch_embed.grid_size ) out_dict[k] = v return out_dict def _convert_dinov2(state_dict, model): out_dict = {} state_dict.pop("mask_token", None) if 'register_tokens' in state_dict: # convert dinov2 w/ registers to no_embed_class timm model (neither cls or reg tokens overlap pos embed) out_dict['reg_token'] = state_dict.pop('register_tokens') out_dict['cls_token'] = state_dict.pop('cls_token') + state_dict['pos_embed'][:, 0] out_dict['pos_embed'] = state_dict.pop('pos_embed')[:, 1:] for k, v in state_dict.items(): if re.match(r"blocks\.(\d+)\.mlp\.w12\.(?:weight|bias)", k): out_dict[k.replace("w12", "fc1")] = v continue elif re.match(r"blocks\.(\d+)\.mlp\.w3\.(?:weight|bias)", k): out_dict[k.replace("w3", "fc2")] = v continue out_dict[k] = v return out_dict def checkpoint_filter_fn( state_dict, model, adapt_layer_scale=False, interpolation='bicubic', antialias=True, ): """ convert patch embedding weight from manual patchify + linear proj to conv""" out_dict = {} state_dict = state_dict.get('model', state_dict) state_dict = state_dict.get('state_dict', state_dict) prefix = '' if 'visual.class_embedding' in state_dict: return _convert_openai_clip(state_dict, model) elif 'module.visual.class_embedding' in state_dict: return _convert_openai_clip(state_dict, model, prefix='module.visual.') if "mask_token" in state_dict: state_dict = _convert_dinov2(state_dict, model) if "encoder" in state_dict: state_dict = state_dict['encoder'] prefix = 'module.' if 'visual.trunk.pos_embed' in state_dict: # convert an OpenCLIP model with timm vision encoder # FIXME remap final nn.Linear if it exists outside of the timm .trunk (ie in visual.head.proj) prefix = 'visual.trunk.' if prefix: # filter on & remove prefix string from keys state_dict = {k[len(prefix):]: v for k, v in state_dict.items() if k.startswith(prefix)} for k, v in state_dict.items(): if 'patch_embed.proj.weight' in k: O, I, H, W = model.patch_embed.proj.weight.shape if len(v.shape) < 4: # For old models that I trained prior to conv based patchification O, I, H, W = model.patch_embed.proj.weight.shape v = v.reshape(O, -1, H, W) if v.shape[-1] != W or v.shape[-2] != H: v = resample_patch_embed( v, (H, W), interpolation=interpolation, antialias=antialias, verbose=True, ) elif k == 'pos_embed' and v.shape[1] != model.pos_embed.shape[1]: # To resize pos embedding when using model at different size from pretrained weights num_prefix_tokens = 0 if getattr(model, 'no_embed_class', False) else getattr(model, 'num_prefix_tokens', 1) v = resample_abs_pos_embed( v, new_size=model.patch_embed.grid_size, num_prefix_tokens=num_prefix_tokens, interpolation=interpolation, antialias=antialias, verbose=True, ) elif adapt_layer_scale and 'gamma_' in k: # remap layer-scale gamma into sub-module (deit3 models) k = re.sub(r'gamma_([0-9])', r'ls\1.gamma', k) elif 'pre_logits' in k: # NOTE representation layer removed as not used in latest 21k/1k pretrained weights continue out_dict[k] = v return out_dict def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, 'first_conv': 'patch_embed.proj', 'classifier': 'head', **kwargs } default_cfgs = { # re-finetuned augreg 21k FT on in1k weights 'vit_base_patch16_224.augreg2_in21k_ft_in1k': _cfg( hf_hub_id='timm/'), 'vit_base_patch16_384.augreg2_in21k_ft_in1k': _cfg(), 'vit_base_patch8_224.augreg2_in21k_ft_in1k': _cfg( hf_hub_id='timm/'), # How to train your ViT (augreg) weights, pretrained on 21k FT on in1k 'vit_tiny_patch16_224.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz', hf_hub_id='timm/', custom_load=True), 'vit_tiny_patch16_384.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', hf_hub_id='timm/', custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), 'vit_small_patch32_224.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/S_32-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz', hf_hub_id='timm/', custom_load=True), 'vit_small_patch32_384.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/S_32-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', hf_hub_id='timm/', custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), 'vit_small_patch16_224.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/S_16-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz', hf_hub_id='timm/', custom_load=True), 'vit_small_patch16_384.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/S_16-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', hf_hub_id='timm/', custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), 'vit_base_patch32_224.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/B_32-i21k-300ep-lr_0.001-aug_medium1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz', hf_hub_id='timm/', custom_load=True), 'vit_base_patch32_384.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/B_32-i21k-300ep-lr_0.001-aug_light1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', hf_hub_id='timm/', custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), 'vit_base_patch16_224.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_224.npz', hf_hub_id='timm/', custom_load=True), 'vit_base_patch16_384.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_384.npz', hf_hub_id='timm/', custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), 'vit_base_patch8_224.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/B_8-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_224.npz', hf_hub_id='timm/', custom_load=True), 'vit_large_patch16_224.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_224.npz', hf_hub_id='timm/', custom_load=True), 'vit_large_patch16_384.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_384.npz', hf_hub_id='timm/', custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), # patch models (weights from official Google JAX impl) pretrained on in21k FT on in1k 'vit_base_patch16_224.orig_in21k_ft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_224-80ecf9dd.pth', hf_hub_id='timm/'), 'vit_base_patch16_384.orig_in21k_ft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_384-83fb41ba.pth', hf_hub_id='timm/', input_size=(3, 384, 384), crop_pct=1.0), 'vit_large_patch32_384.orig_in21k_ft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p32_384-9b920ba8.pth', hf_hub_id='timm/', input_size=(3, 384, 384), crop_pct=1.0), # How to train your ViT (augreg) weights trained on in1k only 'vit_small_patch16_224.augreg_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/S_16-i1k-300ep-lr_0.001-aug_medium2-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_224.npz', hf_hub_id='timm/', custom_load=True), 'vit_small_patch16_384.augreg_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/S_16-i1k-300ep-lr_0.001-aug_medium2-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_384.npz', hf_hub_id='timm/', custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), 'vit_base_patch32_224.augreg_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/B_32-i1k-300ep-lr_0.001-aug_medium2-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_224.npz', hf_hub_id='timm/', custom_load=True), 'vit_base_patch32_384.augreg_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/B_32-i1k-300ep-lr_0.001-aug_medium2-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_384.npz', hf_hub_id='timm/', custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), 'vit_base_patch16_224.augreg_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/B_16-i1k-300ep-lr_0.001-aug_strong2-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_224.npz', hf_hub_id='timm/', custom_load=True), 'vit_base_patch16_384.augreg_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/B_16-i1k-300ep-lr_0.001-aug_strong2-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_384.npz', hf_hub_id='timm/', custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), 'vit_large_patch14_224.untrained': _cfg(url=''), 'vit_huge_patch14_224.untrained': _cfg(url=''), 'vit_giant_patch14_224.untrained': _cfg(url=''), 'vit_gigantic_patch14_224.untrained': _cfg(url=''), # patch models, imagenet21k (weights from official Google JAX impl) 'vit_large_patch32_224.orig_in21k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_patch32_224_in21k-9046d2e7.pth', hf_hub_id='timm/', num_classes=21843), 'vit_huge_patch14_224.orig_in21k': _cfg( url='https://storage.googleapis.com/vit_models/imagenet21k/ViT-H_14.npz', hf_hub_id='timm/', custom_load=True, num_classes=21843), # How to train your ViT (augreg) weights, pretrained on in21k 'vit_tiny_patch16_224.augreg_in21k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0.npz', hf_hub_id='timm/', custom_load=True, num_classes=21843), 'vit_small_patch32_224.augreg_in21k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/S_32-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0.npz', hf_hub_id='timm/', custom_load=True, num_classes=21843), 'vit_small_patch16_224.augreg_in21k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/S_16-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0.npz', hf_hub_id='timm/', custom_load=True, num_classes=21843), 'vit_base_patch32_224.augreg_in21k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/B_32-i21k-300ep-lr_0.001-aug_medium1-wd_0.03-do_0.0-sd_0.0.npz', hf_hub_id='timm/', custom_load=True, num_classes=21843), 'vit_base_patch16_224.augreg_in21k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0.npz', hf_hub_id='timm/', custom_load=True, num_classes=21843), 'vit_base_patch8_224.augreg_in21k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/B_8-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0.npz', hf_hub_id='timm/', custom_load=True, num_classes=21843), 'vit_large_patch16_224.augreg_in21k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1.npz', hf_hub_id='timm/', custom_load=True, num_classes=21843), # SAM trained models (https://arxiv.org/abs/2106.01548) 'vit_base_patch32_224.sam_in1k': _cfg( url='https://storage.googleapis.com/vit_models/sam/ViT-B_32.npz', custom_load=True, hf_hub_id='timm/'), 'vit_base_patch16_224.sam_in1k': _cfg( url='https://storage.googleapis.com/vit_models/sam/ViT-B_16.npz', custom_load=True, hf_hub_id='timm/'), # DINO pretrained - https://arxiv.org/abs/2104.14294 (no classifier head, for fine-tune only) 'vit_small_patch16_224.dino': _cfg( url='https://dl.fbaipublicfiles.com/dino/dino_deitsmall16_pretrain/dino_deitsmall16_pretrain.pth', hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), 'vit_small_patch8_224.dino': _cfg( url='https://dl.fbaipublicfiles.com/dino/dino_deitsmall8_pretrain/dino_deitsmall8_pretrain.pth', hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), 'vit_base_patch16_224.dino': _cfg( url='https://dl.fbaipublicfiles.com/dino/dino_vitbase16_pretrain/dino_vitbase16_pretrain.pth', hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), 'vit_base_patch8_224.dino': _cfg( url='https://dl.fbaipublicfiles.com/dino/dino_vitbase8_pretrain/dino_vitbase8_pretrain.pth', hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), # DINOv2 pretrained - https://arxiv.org/abs/2304.07193 (no classifier head, for fine-tune/features only) 'vit_small_patch14_dinov2.lvd142m': _cfg( url='https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vits14_pretrain.pth', hf_hub_id='timm/', license='apache-2.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0, input_size=(3, 518, 518), crop_pct=1.0), 'vit_base_patch14_dinov2.lvd142m': _cfg( url='https://dl.fbaipublicfiles.com/dinov2/dinov2_vitb14/dinov2_vitb14_pretrain.pth', hf_hub_id='timm/', license='apache-2.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0, input_size=(3, 518, 518), crop_pct=1.0), 'vit_large_patch14_dinov2.lvd142m': _cfg( url='https://dl.fbaipublicfiles.com/dinov2/dinov2_vitl14/dinov2_vitl14_pretrain.pth', hf_hub_id='timm/', license='apache-2.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0, input_size=(3, 518, 518), crop_pct=1.0), 'vit_giant_patch14_dinov2.lvd142m': _cfg( url='https://dl.fbaipublicfiles.com/dinov2/dinov2_vitg14/dinov2_vitg14_pretrain.pth', hf_hub_id='timm/', license='apache-2.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0, input_size=(3, 518, 518), crop_pct=1.0), # DINOv2 pretrained w/ registers - https://arxiv.org/abs/2309.16588 (no classifier head, for fine-tune/features only) 'vit_small_patch14_reg4_dinov2.lvd142m': _cfg( url='https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vits14_reg4_pretrain.pth', hf_hub_id='timm/', license='apache-2.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0, input_size=(3, 518, 518), crop_pct=1.0), 'vit_base_patch14_reg4_dinov2.lvd142m': _cfg( url='https://dl.fbaipublicfiles.com/dinov2/dinov2_vitb14/dinov2_vitb14_reg4_pretrain.pth', hf_hub_id='timm/', license='apache-2.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0, input_size=(3, 518, 518), crop_pct=1.0), 'vit_large_patch14_reg4_dinov2.lvd142m': _cfg( url='https://dl.fbaipublicfiles.com/dinov2/dinov2_vitl14/dinov2_vitl14_reg4_pretrain.pth', hf_hub_id='timm/', license='apache-2.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0, input_size=(3, 518, 518), crop_pct=1.0), 'vit_giant_patch14_reg4_dinov2.lvd142m': _cfg( url='https://dl.fbaipublicfiles.com/dinov2/dinov2_vitg14/dinov2_vitg14_reg4_pretrain.pth', hf_hub_id='timm/', license='apache-2.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0, input_size=(3, 518, 518), crop_pct=1.0), # ViT ImageNet-21K-P pretraining by MILL 'vit_base_patch16_224_miil.in21k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/vit_base_patch16_224_in21k_miil-887286df.pth', hf_hub_id='timm/', mean=(0., 0., 0.), std=(1., 1., 1.), crop_pct=0.875, interpolation='bilinear', num_classes=11221), 'vit_base_patch16_224_miil.in21k_ft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/vit_base_patch16_224_1k_miil_84_4-2deb18e3.pth', hf_hub_id='timm/', mean=(0., 0., 0.), std=(1., 1., 1.), crop_pct=0.875, interpolation='bilinear'), # Custom timm variants 'vit_base_patch16_rpn_224.sw_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_base_patch16_rpn_224-sw-3b07e89d.pth', hf_hub_id='timm/'), 'vit_medium_patch16_gap_240.sw_in12k': _cfg( hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95, num_classes=11821), 'vit_medium_patch16_gap_256.sw_in12k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), crop_pct=0.95), 'vit_medium_patch16_gap_384.sw_in12k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), crop_pct=0.95, crop_mode='squash'), 'vit_base_patch16_gap_224': _cfg(), # CLIP pretrained image tower and related fine-tuned weights 'vit_base_patch32_clip_224.laion2b_ft_in12k_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD), 'vit_base_patch32_clip_384.laion2b_ft_in12k_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, input_size=(3, 384, 384)), 'vit_base_patch32_clip_448.laion2b_ft_in12k_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, input_size=(3, 448, 448)), 'vit_base_patch16_clip_224.laion2b_ft_in12k_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=0.95), 'vit_base_patch16_clip_384.laion2b_ft_in12k_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, input_size=(3, 384, 384), crop_mode='squash'), 'vit_large_patch14_clip_224.laion2b_ft_in12k_in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, crop_pct=1.0), 'vit_large_patch14_clip_336.laion2b_ft_in12k_in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, crop_pct=1.0, input_size=(3, 336, 336), crop_mode='squash'), 'vit_huge_patch14_clip_224.laion2b_ft_in12k_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0), 'vit_huge_patch14_clip_336.laion2b_ft_in12k_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, input_size=(3, 336, 336), crop_mode='squash'), 'vit_base_patch32_clip_224.openai_ft_in12k_in1k': _cfg( # hf_hub_id='timm/vit_base_patch32_clip_224.openai_ft_in12k_in1k', # FIXME weight exists, need to push mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD), 'vit_base_patch32_clip_384.openai_ft_in12k_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=0.95, input_size=(3, 384, 384), crop_mode='squash'), 'vit_base_patch16_clip_224.openai_ft_in12k_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=0.95), 'vit_base_patch16_clip_384.openai_ft_in12k_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=0.95, input_size=(3, 384, 384), crop_mode='squash'), 'vit_large_patch14_clip_224.openai_ft_in12k_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0), 'vit_large_patch14_clip_336.openai_ft_in12k_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, input_size=(3, 336, 336), crop_mode='squash'), 'vit_base_patch32_clip_224.laion2b_ft_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD), 'vit_base_patch16_clip_224.laion2b_ft_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0), 'vit_base_patch16_clip_384.laion2b_ft_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, input_size=(3, 384, 384), crop_mode='squash'), 'vit_large_patch14_clip_224.laion2b_ft_in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, crop_pct=1.0), 'vit_large_patch14_clip_336.laion2b_ft_in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, crop_pct=1.0, input_size=(3, 336, 336), crop_mode='squash'), 'vit_huge_patch14_clip_224.laion2b_ft_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0), 'vit_huge_patch14_clip_336.laion2b_ft_in1k': _cfg( hf_hub_id='', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, input_size=(3, 336, 336), crop_mode='squash'), 'vit_base_patch32_clip_224.openai_ft_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD), 'vit_base_patch16_clip_224.openai_ft_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD), 'vit_base_patch16_clip_384.openai_ft_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, input_size=(3, 384, 384), crop_mode='squash'), 'vit_large_patch14_clip_224.openai_ft_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0), 'vit_base_patch32_clip_224.laion2b_ft_in12k': _cfg( #hf_hub_id='timm/vit_base_patch32_clip_224.laion2b_ft_in12k', # FIXME weight exists, need to push mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821), 'vit_base_patch16_clip_224.laion2b_ft_in12k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821), 'vit_large_patch14_clip_224.laion2b_ft_in12k': _cfg( hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, crop_pct=1.0, num_classes=11821), 'vit_huge_patch14_clip_224.laion2b_ft_in12k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=11821), 'vit_base_patch32_clip_224.openai_ft_in12k': _cfg( # hf_hub_id='timm/vit_base_patch32_clip_224.openai_ft_in12k', # FIXME weight exists, need to push mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821), 'vit_base_patch16_clip_224.openai_ft_in12k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821), 'vit_large_patch14_clip_224.openai_ft_in12k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=11821), 'vit_base_patch32_clip_224.laion2b': _cfg( hf_hub_id='laion/CLIP-ViT-B-32-laion2B-s34B-b79K', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=512), 'vit_base_patch16_clip_224.laion2b': _cfg( hf_hub_id='laion/CLIP-ViT-B-16-laion2B-s34B-b88K', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=512), 'vit_large_patch14_clip_224.laion2b': _cfg( hf_hub_id='laion/CLIP-ViT-L-14-laion2B-s32B-b82K', hf_hub_filename='open_clip_pytorch_model.bin', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, crop_pct=1.0, num_classes=768), 'vit_huge_patch14_clip_224.laion2b': _cfg( hf_hub_id='laion/CLIP-ViT-H-14-laion2B-s32B-b79K', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=1024), 'vit_giant_patch14_clip_224.laion2b': _cfg( hf_hub_id='laion/CLIP-ViT-g-14-laion2B-s12B-b42K', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=1024), 'vit_gigantic_patch14_clip_224.laion2b': _cfg( hf_hub_id='laion/CLIP-ViT-bigG-14-laion2B-39B-b160k', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=1280), 'vit_base_patch32_clip_224.datacompxl': _cfg( hf_hub_id='laion/CLIP-ViT-B-32-DataComp.XL-s13B-b90K', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=512), 'vit_base_patch32_clip_256.datacompxl': _cfg( hf_hub_id='laion/CLIP-ViT-B-32-256x256-DataComp-s34B-b86K', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, input_size=(3, 256, 256), num_classes=512), 'vit_base_patch16_clip_224.datacompxl': _cfg( hf_hub_id='laion/CLIP-ViT-B-16-DataComp.XL-s13B-b90K', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=512), 'vit_large_patch14_clip_224.datacompxl': _cfg( hf_hub_id='laion/CLIP-ViT-L-14-DataComp.XL-s13B-b90K', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=768), 'vit_base_patch16_clip_224.dfn2b': _cfg( hf_hub_id='apple/DFN2B-CLIP-ViT-B-16', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=512), 'vit_large_patch14_clip_224.dfn2b': _cfg( hf_hub_id='apple/DFN2B-CLIP-ViT-L-14', hf_hub_filename='open_clip_pytorch_model.bin', notes=('natively QuickGELU, use quickgelu model variant for original results',), mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=768), 'vit_huge_patch14_clip_224.dfn5b': _cfg( hf_hub_id='apple/DFN5B-CLIP-ViT-H-14', hf_hub_filename='open_clip_pytorch_model.bin', notes=('natively QuickGELU, use quickgelu model variant for original results',), mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=1024), 'vit_huge_patch14_clip_378.dfn5b': _cfg( hf_hub_id='apple/DFN5B-CLIP-ViT-H-14-378', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, notes=('natively QuickGELU, use quickgelu model variant for original results',), crop_pct=1.0, input_size=(3, 378, 378), num_classes=1024), 'vit_base_patch32_clip_224.metaclip_2pt5b': _cfg( hf_hub_id='facebook/metaclip-b32-fullcc2.5b', hf_hub_filename='metaclip_b32_fullcc2.5b.bin', license='cc-by-nc-4.0', notes=('natively QuickGELU, use quickgelu model variant for original results',), mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=512), 'vit_base_patch16_clip_224.metaclip_2pt5b': _cfg( hf_hub_id='facebook/metaclip-b16-fullcc2.5b', hf_hub_filename='metaclip_b16_fullcc2.5b.bin', license='cc-by-nc-4.0', notes=('natively QuickGELU, use quickgelu model variant for original results',), mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=512), 'vit_large_patch14_clip_224.metaclip_2pt5b': _cfg( hf_hub_id='facebook/metaclip-l14-fullcc2.5b', hf_hub_filename='metaclip_l14_fullcc2.5b.bin', license='cc-by-nc-4.0', notes=('natively QuickGELU, use quickgelu model variant for original results',), mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=768), 'vit_huge_patch14_clip_224.metaclip_2pt5b': _cfg( hf_hub_id='facebook/metaclip-h14-fullcc2.5b', hf_hub_filename='metaclip_h14_fullcc2.5b.bin', license='cc-by-nc-4.0', notes=('natively QuickGELU, use quickgelu model variant for original results',), mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=1024), 'vit_base_patch32_clip_224.openai': _cfg( hf_hub_id='timm/', notes=('natively QuickGELU, use quickgelu model variant for original results',), mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=512), 'vit_base_patch16_clip_224.openai': _cfg( hf_hub_id='timm/', notes=('natively QuickGELU, use quickgelu model variant for original results',), mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=512), 'vit_large_patch14_clip_224.openai': _cfg( hf_hub_id='timm/', notes=('natively QuickGELU, use quickgelu model variant for original results',), mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=768), 'vit_large_patch14_clip_336.openai': _cfg( hf_hub_id='timm/', hf_hub_filename='open_clip_pytorch_model.bin', notes=('natively QuickGELU, use quickgelu model variant for original results',), mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, input_size=(3, 336, 336), num_classes=768), # experimental (may be removed) 'vit_base_patch32_plus_256.untrained': _cfg(url='', input_size=(3, 256, 256), crop_pct=0.95), 'vit_base_patch16_plus_240.untrained': _cfg(url='', input_size=(3, 240, 240), crop_pct=0.95), 'vit_small_patch16_36x1_224.untrained': _cfg(url=''), 'vit_small_patch16_18x2_224.untrained': _cfg(url=''), 'vit_base_patch16_18x2_224.untrained': _cfg(url=''), # EVA fine-tuned weights from MAE style MIM - EVA-CLIP target pretrain # https://github.com/baaivision/EVA/blob/7ecf2c0a370d97967e86d047d7af9188f78d2df3/eva/README.md#eva-l-learning-better-mim-representations-from-eva-clip 'eva_large_patch14_196.in22k_ft_in22k_in1k': _cfg( # hf_hub_id='BAAI/EVA', hf_hub_filename='eva_l_psz14_196px_21k_to_1k_ft_88p6.pt', hf_hub_id='timm/', license='mit', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 196, 196), crop_pct=1.0), 'eva_large_patch14_336.in22k_ft_in22k_in1k': _cfg( # hf_hub_id='BAAI/EVA', hf_hub_filename='eva_l_psz14_336px_21k_to_1k_ft_89p2.pt', hf_hub_id='timm/', license='mit', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 336, 336), crop_pct=1.0, crop_mode='squash'), 'eva_large_patch14_196.in22k_ft_in1k': _cfg( # hf_hub_id='BAAI/EVA', hf_hub_filename='eva_l_psz14_196px_1k_ft_88p0.pt', hf_hub_id='timm/', license='mit', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 196, 196), crop_pct=1.0), 'eva_large_patch14_336.in22k_ft_in1k': _cfg( # hf_hub_id='BAAI/EVA', hf_hub_filename='eva_l_psz14_336px_1k_ft_88p65.pt', hf_hub_id='timm/', license='mit', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 336, 336), crop_pct=1.0, crop_mode='squash'), 'flexivit_small.1200ep_in1k': _cfg( url='https://storage.googleapis.com/big_vision/flexivit/flexivit_s_i1k.npz', custom_load=True, hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95), 'flexivit_small.600ep_in1k': _cfg( url='https://storage.googleapis.com/big_vision/flexivit/flexivit_s_i1k_600ep.npz', custom_load=True, hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95), 'flexivit_small.300ep_in1k': _cfg( url='https://storage.googleapis.com/big_vision/flexivit/flexivit_s_i1k_300ep.npz', custom_load=True, hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95), 'flexivit_base.1200ep_in1k': _cfg( url='https://storage.googleapis.com/big_vision/flexivit/flexivit_b_i1k.npz', custom_load=True, hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95), 'flexivit_base.600ep_in1k': _cfg( url='https://storage.googleapis.com/big_vision/flexivit/flexivit_b_i1k_600ep.npz', custom_load=True, hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95), 'flexivit_base.300ep_in1k': _cfg( url='https://storage.googleapis.com/big_vision/flexivit/flexivit_b_i1k_300ep.npz', custom_load=True, hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95), 'flexivit_base.1000ep_in21k': _cfg( url='https://storage.googleapis.com/big_vision/flexivit/flexivit_b_i21k_1000ep.npz', custom_load=True, hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95, num_classes=21843), 'flexivit_base.300ep_in21k': _cfg( url='https://storage.googleapis.com/big_vision/flexivit/flexivit_b_i21k_300ep.npz', custom_load=True, hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95, num_classes=21843), 'flexivit_large.1200ep_in1k': _cfg( url='https://storage.googleapis.com/big_vision/flexivit/flexivit_l_i1k.npz', custom_load=True, hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95), 'flexivit_large.600ep_in1k': _cfg( url='https://storage.googleapis.com/big_vision/flexivit/flexivit_l_i1k_600ep.npz', custom_load=True, hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95), 'flexivit_large.300ep_in1k': _cfg( url='https://storage.googleapis.com/big_vision/flexivit/flexivit_l_i1k_300ep.npz', custom_load=True, hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95), 'flexivit_base.patch16_in21k': _cfg( url='https://storage.googleapis.com/big_vision/flexivit/vit_b16_i21k_300ep.npz', custom_load=True, hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95, num_classes=21843), 'flexivit_base.patch30_in21k': _cfg( url='https://storage.googleapis.com/big_vision/flexivit/vit_b30_i21k_300ep.npz', custom_load=True, hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95, num_classes=21843), 'vit_base_patch16_xp_224.untrained': _cfg(url=''), 'vit_large_patch14_xp_224.untrained': _cfg(url=''), 'vit_huge_patch14_xp_224.untrained': _cfg(url=''), 'vit_base_patch16_224.mae': _cfg( url='https://dl.fbaipublicfiles.com/mae/pretrain/mae_pretrain_vit_base.pth', hf_hub_id='timm/', license='cc-by-nc-4.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), 'vit_large_patch16_224.mae': _cfg( url='https://dl.fbaipublicfiles.com/mae/pretrain/mae_pretrain_vit_large.pth', hf_hub_id='timm/', license='cc-by-nc-4.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), 'vit_huge_patch14_224.mae': _cfg( url='https://dl.fbaipublicfiles.com/mae/pretrain/mae_pretrain_vit_huge.pth', hf_hub_id='timm/', license='cc-by-nc-4.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), 'vit_huge_patch14_gap_224.in1k_ijepa': _cfg( url='https://dl.fbaipublicfiles.com/ijepa/IN1K-vit.h.14-300e.pth.tar', # hf_hub_id='timm/', license='cc-by-nc-4.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), 'vit_huge_patch14_gap_224.in22k_ijepa': _cfg( url='https://dl.fbaipublicfiles.com/ijepa/IN22K-vit.h.14-900e.pth.tar', # hf_hub_id='timm/', license='cc-by-nc-4.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), 'vit_huge_patch16_gap_448.in1k_ijepa': _cfg( url='https://dl.fbaipublicfiles.com/ijepa/IN1K-vit.h.16-448px-300e.pth.tar', # hf_hub_id='timm/', license='cc-by-nc-4.0', input_size=(3, 448, 448), crop_pct=1.0, mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), 'vit_giant_patch16_gap_224.in22k_ijepa': _cfg( url='https://dl.fbaipublicfiles.com/ijepa/IN22K-vit.g.16-600e.pth.tar', # hf_hub_id='timm/', license='cc-by-nc-4.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), 'vit_base_patch16_siglip_224.webli': _cfg( hf_hub_id='timm/ViT-B-16-SigLIP', hf_hub_filename='open_clip_pytorch_model.bin', num_classes=0), 'vit_base_patch16_siglip_256.webli': _cfg( hf_hub_id='timm/ViT-B-16-SigLIP-256', hf_hub_filename='open_clip_pytorch_model.bin', input_size=(3, 256, 256), num_classes=0), 'vit_base_patch16_siglip_384.webli': _cfg( hf_hub_id='timm/ViT-B-16-SigLIP-384', hf_hub_filename='open_clip_pytorch_model.bin', input_size=(3, 384, 384), num_classes=0), 'vit_base_patch16_siglip_512.webli': _cfg( hf_hub_id='timm/ViT-B-16-SigLIP-512', hf_hub_filename='open_clip_pytorch_model.bin', input_size=(3, 512, 512), num_classes=0), 'vit_large_patch16_siglip_256.webli': _cfg( hf_hub_id='timm/ViT-L-16-SigLIP-256', hf_hub_filename='open_clip_pytorch_model.bin', input_size=(3, 256, 256), num_classes=0), 'vit_large_patch16_siglip_384.webli': _cfg( hf_hub_id='timm/ViT-L-16-SigLIP-384', hf_hub_filename='open_clip_pytorch_model.bin', input_size=(3, 384, 384), num_classes=0), 'vit_so400m_patch14_siglip_224.webli': _cfg( hf_hub_id='timm/ViT-SO400M-14-SigLIP', hf_hub_filename='open_clip_pytorch_model.bin', num_classes=0), 'vit_so400m_patch14_siglip_384.webli': _cfg( hf_hub_id='timm/ViT-SO400M-14-SigLIP-384', hf_hub_filename='open_clip_pytorch_model.bin', input_size=(3, 384, 384), num_classes=0), 'vit_medium_patch16_reg4_256': _cfg( input_size=(3, 256, 256)), 'vit_medium_patch16_reg4_gap_256': _cfg( input_size=(3, 256, 256)), 'vit_base_patch16_reg8_gap_256': _cfg(input_size=(3, 256, 256)), } _quick_gelu_cfgs = [ 'vit_large_patch14_clip_224.dfn2b', 'vit_huge_patch14_clip_224.dfn5b', 'vit_huge_patch14_clip_378.dfn5b', 'vit_base_patch32_clip_224.metaclip_2pt5b', 'vit_base_patch16_clip_224.metaclip_2pt5b', 'vit_large_patch14_clip_224.metaclip_2pt5b', 'vit_huge_patch14_clip_224.metaclip_2pt5b', 'vit_base_patch32_clip_224.openai', 'vit_base_patch16_clip_224.openai', 'vit_large_patch14_clip_224.openai', 'vit_large_patch14_clip_336.openai', ] default_cfgs.update({ n.replace('_clip_', '_clip_quickgelu_'): default_cfgs[n] for n in _quick_gelu_cfgs }) default_cfgs = generate_default_cfgs(default_cfgs) def _create_vision_transformer(variant, pretrained=False, **kwargs): if kwargs.get('features_only', None): raise RuntimeError('features_only not implemented for Vision Transformer models.') if 'flexi' in variant: # FIXME Google FlexiViT pretrained models have a strong preference for bilinear patch / embed # interpolation, other pretrained models resize better w/ anti-aliased bicubic interpolation. _filter_fn = partial(checkpoint_filter_fn, interpolation='bilinear', antialias=False) else: _filter_fn = checkpoint_filter_fn # FIXME attn pool (currently only in siglip) params removed if pool disabled, is there a better soln? strict = True if 'siglip' in variant and kwargs.get('global_pool', None) != 'map': strict = False
return build_model_with_cfg(
0
2023-11-05 01:25:14+00:00
12k
ilur98/DGQ
dgq/quant/quant_sequence.py
[ { "identifier": "prepare_hook", "path": "dgq/quant/smooth_hooker.py", "snippet": "def prepare_hook(layer, inps, qconfig, inps_kwargs): \n handles = []\n for mod in layer.modules():\n if isinstance(mod, nn.LayerNorm) or isinstance(mod, LlamaRMSNorm):\n if qconfig[\"meanact\"]:\n handles.append(mod.register_forward_hook(sta_batch_minmax))\n if qconfig[\"smoothquant\"]:\n handles.append(mod.register_forward_hook(sta_batch0))\n if isinstance(layer, LlamaDecoderLayer):\n handles.append(layer.mlp.down_proj.register_forward_hook(sta_batch1))\n handles.append(layer.self_attn.o_proj.register_forward_hook(sta_batch1))\n if qconfig['kvquant']:\n handles.append(layer.self_attn.k_quant.register_forward_hook(sta_batch_qkv))\n handles.append(layer.self_attn.v_quant.register_forward_hook(sta_batch_qkv))\n handles.append(layer.self_attn.q_quant.register_forward_hook(sta_batch_qkv))\n elif isinstance(layer, OPTDecoderLayer):\n handles.append(layer.fc2.register_forward_hook(sta_batch1))\n handles.append(layer.self_attn.out_proj.register_forward_hook(sta_batch1))\n if qconfig['kvquant']:\n handles.append(layer.self_attn.k_quant.register_forward_hook(sta_batch_qkv))\n handles.append(layer.self_attn.v_quant.register_forward_hook(sta_batch_qkv))\n handles.append(layer.self_attn.q_quant.register_forward_hook(sta_batch_qkv))\n elif isinstance(layer, BloomBlock):\n if qconfig['kvquant']:\n handles.append(layer.self_attn.k_quant.register_forward_hook(sta_batch_qkv))\n handles.append(layer.self_attn.v_quant.register_forward_hook(sta_batch_qkv))\n handles.append(layer.self_attn.q_quant.register_forward_hook(sta_batch_qkv))\n else:\n raise NotImplemented\n\n for inp in inps:\n # print(inp.unsqueeze(0).shape)\n layer(inp.unsqueeze(0), **inps_kwargs)\n for h in handles:\n h.remove()\n return " }, { "identifier": "mean_bias", "path": "dgq/quant/smooth.py", "snippet": "@torch.no_grad()\ndef mean_bias(module):\n if isinstance(module, OPTDecoderLayer):\n attn_ln = module.self_attn_layer_norm\n qkv = [module.self_attn.q_proj,\n module.self_attn.k_proj, module.self_attn.v_proj]\n qkv_input_scales = (attn_ln.out_max + attn_ln.out_min) / 2\n mean_ln_fcs(attn_ln, qkv, qkv_input_scales)\n\n ffn_ln = module.final_layer_norm\n fc1 = module.fc1\n fc1_input_scales = (ffn_ln.out_max + ffn_ln.out_min) / 2\n mean_ln_fcs(ffn_ln, fc1, fc1_input_scales)\n elif isinstance(module, BloomBlock):\n attn_ln = module.input_layernorm\n qkv = module.self_attention.query_key_value\n qkv_input_scales = (attn_ln.out_max + attn_ln.out_min) / 2\n mean_ln_fcs(attn_ln, qkv, qkv_input_scales)\n\n ffn_ln = module.post_attention_layernorm\n fc1 = module.mlp.dense_h_to_4h\n fc1_input_scales = (ffn_ln.out_max + ffn_ln.out_min) / 2\n mean_ln_fcs(ffn_ln, fc1, fc1_input_scales)\n elif isinstance(module, LlamaDecoderLayer):\n attn_ln = module.input_layernorm\n qkv = [module.self_attn.q_proj,\n module.self_attn.k_proj, module.self_attn.v_proj]\n qkv_input_scales = (attn_ln.out_max + attn_ln.out_min) / 2\n mean_ln_fcs(attn_ln, qkv, qkv_input_scales)\n ffn_ln = module.post_attention_layernorm\n gate_proj = [module.mlp.gate_proj,module.mlp.up_proj]\n gate_proj_scales = (ffn_ln.out_max + ffn_ln.out_min) / 2\n mean_ln_fcs(ffn_ln, gate_proj, gate_proj_scales)\n for mod in module.modules():\n if hasattr(mod, 'out_max'):\n delattr(mod, 'out_max')\n if hasattr(mod, 'out_min'):\n delattr(mod, 'out_min') " }, { "identifier": "smooth_module", "path": "dgq/quant/smooth.py", "snippet": "@torch.no_grad()\ndef smooth_module(module, alpha=0.5, group_size=-1, weight_smooth=False, attention_mask=None, position_ids=None):\n if weight_smooth:\n if isinstance(module, OPTDecoderLayer):\n attn_ln = module.self_attn_layer_norm\n qkv = [module.self_attn.q_proj,\n module.self_attn.k_proj, module.self_attn.v_proj]\n # smooth_ln_fcs_weight(attn_ln, qkv) ##opt66b very bad...\n smooth_fc_weight(module.self_attn.v_proj, module.self_attn.out_proj, group_size)\n ffn_ln = module.final_layer_norm\n fc1 = module.fc1\n smooth_ln_fcs_weight(ffn_ln, fc1)\n smooth_fc_weight(module.fc1, module.fc2, group_size)\n elif isinstance(module, BloomBlock):\n attn_ln = module.input_layernorm\n qkv = module.self_attention.query_key_value\n smooth_ln_fcs_weight(attn_ln, qkv)\n v_proj = module.self_attention.query_key_value\n o_proj = module.self_attention.dense\n # smooth_fc_weight(v_proj, o_proj,qkv=True) ##bloom3b bad\n ffn_ln = module.post_attention_layernorm\n fc1 = module.mlp.dense_h_to_4h\n smooth_ln_fcs_weight(ffn_ln, fc1)\n # smooth_fc_weight(module.mlp.dense_4h_to_h, module.mlp.dense_h_to_4h, group_size)\n elif isinstance(module, LlamaDecoderLayer):\n attn_ln = module.input_layernorm\n qkv = [module.self_attn.q_proj,\n module.self_attn.k_proj, module.self_attn.v_proj]\n smooth_ln_fcs_weight(attn_ln, qkv)\n smooth_fc_weight(module.self_attn.v_proj, module.self_attn.o_proj, group_size)\n ffn_ln = module.post_attention_layernorm\n gate_proj = [module.mlp.gate_proj,module.mlp.up_proj]\n smooth_ln_fcs_weight(ffn_ln, gate_proj)\n smooth_fc_weight(module.mlp.up_proj, module.mlp.down_proj, group_size)\n else:\n if isinstance(module, OPTDecoderLayer):\n attn_ln = module.self_attn_layer_norm\n qkv = [module.self_attn.q_proj,\n module.self_attn.k_proj, module.self_attn.v_proj]\n qkv_input_scales = attn_ln.out_absmax\n smooth_ln_fcs(attn_ln, qkv, qkv_input_scales, alpha)\n v_proj = module.self_attn.v_proj\n o_proj = module.self_attn.out_proj\n # smooth_ov(v_proj, o_proj, o_proj.inp_absmax)\n ffn_ln = module.final_layer_norm\n fc1 = module.fc1\n fc1_input_scales = ffn_ln.out_absmax\n smooth_ln_fcs(ffn_ln, fc1, fc1_input_scales, alpha)\n # fc2 = module.fc2\n # fc2.inp_bias = ((fc2.inp_absmax )/2 ).clamp(min=0.).to(torch.float16)\n elif isinstance(module, BloomBlock):\n attn_ln = module.input_layernorm\n qkv = module.self_attention.query_key_value\n qkv_input_scales = attn_ln.out_absmax\n smooth_ln_fcs(attn_ln, qkv, qkv_input_scales, alpha)\n v_proj = module.self_attention.query_key_value\n o_proj = module.self_attention.dense\n # smooth_ov(v_proj, o_proj, o_proj.inp_absmax,qkv=True)\n ffn_ln = module.post_attention_layernorm\n fc1 = module.mlp.dense_h_to_4h\n fc1_input_scales = ffn_ln.out_absmax\n smooth_ln_fcs(ffn_ln, fc1, fc1_input_scales, alpha)\n fc2 = module.mlp.dense_4h_to_h\n fc2.inp_bias = ((fc2.inp_absmax + 0.2)/2 - 0.2 ).clamp(min=0.).to(torch.float16)\n elif isinstance(module, LlamaDecoderLayer):\n attn_ln = module.input_layernorm\n qkv = [module.self_attn.q_proj,\n module.self_attn.k_proj, module.self_attn.v_proj]\n qkv_input_scales = attn_ln.out_absmax\n smooth_ln_fcs(attn_ln, qkv, qkv_input_scales, alpha)\n v_proj = module.self_attn.v_proj\n o_proj = module.self_attn.o_proj\n # smooth_ov(v_proj, o_proj, o_proj.inp_absmax)\n ffn_ln = module.post_attention_layernorm\n gate_proj = [module.mlp.gate_proj,module.mlp.up_proj]\n gate_proj_scales = ffn_ln.out_absmax\n smooth_ln_fcs(ffn_ln, gate_proj, gate_proj_scales, alpha)\n smooth_llama_mlp(module.mlp.gate_proj,module.mlp.up_proj,module.mlp.down_proj,module.mlp.down_proj.inp_absmax)\n for mod in module.modules():\n if hasattr(mod, 'inp_absmax'):\n delattr(mod, 'inp_absmax')\n if hasattr(mod, 'out_absmax'):\n delattr(mod, 'out_absmax')\n if hasattr(mod, 'inp_absmean'):\n delattr(mod, 'inp_absmean')\n if hasattr(mod, 'out_absmean'):\n delattr(mod, 'out_absmean')" }, { "identifier": "QuantLinear", "path": "dgq/quant/quant_linear.py", "snippet": "class QuantLinear(nn.Module):\n def __init__(self, in_features, out_features, bias, qconfig):\n super().__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.actq = qconfig[\"act_quant\"] is not None\n self.wtq = qconfig[\"wt_quant\"] is not None\n self.qconfig = qconfig\n if self.actq:\n self.abits = self.qconfig[\"act_quant\"][\"bits\"]\n self.register_buffer(\"amax\", torch.zeros(1, dtype=torch.bfloat16))\n if self.wtq:\n self.groupsize = self.qconfig[\"wt_quant\"][\"groupsize\"] if self.qconfig[\"wt_quant\"][\"groupsize\"] != -1 else self.in_features\n self.wbits = self.qconfig[\"wt_quant\"][\"bits\"]\n self.register_buffer('qweight', torch.zeros((in_features // 32 * self.wbits, out_features), dtype=torch.int32))\n self.register_buffer('wscales', torch.zeros((math.ceil(in_features / self.groupsize), out_features), dtype=torch.bfloat16))\n self.register_buffer('wzeros', torch.zeros((math.ceil(in_features / self.groupsize), out_features // 32 * self.wbits), dtype=torch.int32))\n if qconfig[\"wt_quant\"][\"w4w8\"]:\n self.register_buffer('wscales8', torch.zeros((out_features, ), dtype=torch.float16))\n if bias:\n self.register_buffer('bias', torch.zeros((out_features), dtype=torch.float16))\n else:\n self.bias = None\n\n def unpack(self, tensor):\n if self.wbits < 8:\n fintweight = python_decompress(tensor).view(-1, self.groupsize)\n else:\n fintweight = tensor.view(-1, self.groupsize)\n if hasattr(self, \"wscales8\"):\n qscales = (self.wscales.view(self.out_features, -1) * self.wscales8).view(-1, 1).to(tensor.device)\n else:\n qscales = self.wscales.to(tensor.device)\n fweight = (fintweight - self.wzeros.to(tensor.device)) * qscales\n\n return fweight.view(self.out_features, self.in_features).bfloat16()\n\n def pack(self, scales, zeros):\n scales = scales.contiguous().bfloat16().reshape(-1, 1)\n self.wscales = scales\n zeros = zeros.contiguous().bfloat16().reshape(-1, 1)\n self.wzeros = zeros\n scale_zeros = zeros.reshape(-1,1) * scales.reshape(-1,1)\n intweight = torch.round((self.weight.data.view(-1, self.groupsize)) / self.wscales + self.wzeros).to(torch.int)\n delattr(self, \"weight\")\n if self.wbits < 8:\n self.qweight = python_compress(intweight)\n else:\n self.qweight = intweight\n def prepare_actfun(self):\n if self.qconfig[\"act_quant\"] is None:\n return\n if self.qconfig[\"act_quant\"][\"method\"] == \"static\":\n self.act_quant = partial(quantize_activation_static,absmax=self.amax)\n # self.act_quant = quantize_activation_static\n elif self.qconfig[\"act_quant\"][\"method\"] == \"per_tensor\":\n self.act_quant = quantize_activation_per_tensor_absmax\n elif self.qconfig[\"act_quant\"][\"method\"] == \"per_token\":\n self.act_quant = quantize_activation_per_token_absmax\n else:\n raise NotImplemented\n def packW4W8(self, scales, zeros, scales8):\n scales = scales.contiguous().char().reshape(-1, 1)\n self.wscales = scales\n zeros = zeros.contiguous().char().reshape(-1, 1)\n self.wzeros = zeros\n scales8 = scales8.contiguous().bfloat16().reshape(-1, 1)\n self.wscales8 = scales8.reshape(-1, 1)\n qscales = (self.wscales.view(self.out_features, -1) * self.wscales8).view(-1, 1)\n intweight = torch.round((self.weight.data.view(-1, self.groupsize).float()) / qscales.reshape(-1, 1) + self.wzeros).to(torch.int)\n self.qweight = python_compress(intweight)\n delattr(self, \"weight\")\n\n def setquant(self, actq, wtq):\n self.actq = actq\n self.wtq = wtq\n\n def forward(self, x):\n out_shape = x.shape[:-1] + (self.out_features, )\n if self.actq:\n x = self.act_quant(x)\n if self.wtq:\n weight = self.unpack(self.qweight)\n else:\n weight = self.weight\n out = x.reshape(-1, x.shape[-1]) @ weight.t()\n out = out + self.bias if self.bias is not None else out \n return out.reshape(out_shape).to(x.dtype)" }, { "identifier": "QuantizerHelper", "path": "dgq/quant/quantizer_helper.py", "snippet": "class QuantizerHelper:\n\n def __init__(self, layer, observe=False):\n self.layer = layer\n self.dev = self.layer.weight.device\n W = layer.weight.data.clone()\n if isinstance(self.layer, nn.Conv2d):\n W = W.flatten(1)\n if isinstance(self.layer, transformers.Conv1D):\n W = W.t()\n self.rows = W.shape[0]\n self.columns = W.shape[1]\n self.H = torch.zeros((self.columns, self.columns), device=self.dev)\n self.nsamples = 0\n self.quantizer = Quantizer()\n self.observe = observe\n self.inp_absmax = None\n\n def add_batch(self, inp, out):\n # Hessian H = 2 X XT + λ I\n hidden_dim = inp.shape[-1]\n comming_max = torch.max(inp.view(-1, hidden_dim).abs().detach(), dim=0)[0].float().cpu()\n # print(comming_max)\n if self.inp_absmax is None:\n self.inp_absmax = comming_max\n self.inp_absmax2 = comming_max\n self.cnt = 1\n else:\n self.inp_absmax = self.inp_absmax.min( comming_max)\n self.inp_absmax2 = (self.inp_absmax+comming_max*self.cnt)/(self.cnt+1)\n self.cnt += 1\n self.layer.inp_absmax = self.inp_absmax #+ (self.inp_absmax2-self.inp_absmax)*0.2\n\n if len(inp.shape) == 2:\n inp = inp.unsqueeze(0)\n tmp = inp.shape[0]\n self.inp1 = inp.squeeze()\n self.out1 = None\n if isinstance(self.layer, nn.Linear) or isinstance(self.layer, transformers.Conv1D) or isinstance(self.layer, QuantLinear):\n if len(inp.shape) == 3:\n inp = inp.reshape((-1, inp.shape[-1]))\n inp = inp.t()\n if isinstance(self.layer, nn.Conv2d):\n unfold = nn.Unfold(self.layer.kernel_size, dilation=self.layer.dilation, padding=self.layer.padding, stride=self.layer.stride)\n inp = unfold(inp)\n inp = inp.permute([1, 0, 2])\n inp = inp.flatten(1)\n self.H *= self.nsamples / (self.nsamples + tmp)\n self.nsamples += tmp\n inp = math.sqrt(2 / self.nsamples) * inp.float()\n self.H += inp.matmul(inp.t())\n\n def print_loss(self, name, q_weight, weight_error, timecost):\n table = Texttable()\n name += ' ' * (16 - len(name))\n\n table.header(['name', 'weight_error', 'fp_inp_SNR', 'q_inp_SNR', 'time'])\n\n # assign weight\n self.layer.weight.data = q_weight.reshape(self.layer.weight.shape).to(self.layer.weight.data.dtype)\n\n if self.out1 is not None:\n # quantize input to int8\n quantizer = Quantizer()\n quantizer.configure(8, perchannel=False, sym=True, mse=False)\n quantizer.find_params(self.inp1)\n q_in = quantizer.quantize(self.inp1).type(torch.float16)\n q_out = self.layer(q_in)\n\n # get kinds of SNR\n q_SNR = torch_snr_error(q_out, self.out1).item()\n fp_SNR = torch_snr_error(self.layer(self.inp1), self.out1).item()\n else:\n q_SNR = '-'\n fp_SNR = '-'\n\n table.add_row([name, weight_error, fp_SNR, q_SNR, timecost])\n print(table.draw().split('\\n')[-2])\n\n\n def naivequant(self, groupsize=-1):\n self.method = 'naive'\n self.layer.to(self.dev)\n\n W = self.layer.weight.data.clone()\n org_shape = W.shape\n W = W.float()\n if groupsize >0:\n org_shape = W.shape\n tmp_W = W.view(-1, groupsize)\n self.quantizer.find_params(tmp_W, True)\n self.layer.weight.data = self.quantizer.quantize(tmp_W).to(self.layer.weight.data.dtype).view(org_shape)\n else:\n self.quantizer.find_params(W, weight=True)\n self.layer.weight.data = self.quantizer.quantize(W).to(self.layer.weight.data.dtype)\n\n scale = self.quantizer.scale.view(org_shape[0], -1)\n zero = self.quantizer.zero.view(org_shape[0], -1)\n return scale, zero\n\n def searchquant(self, groupsize=-1, W4W8=False):\n self.method = 'search'\n W = self.layer.weight.data.clone()\n org_shape = W.shape\n\n device, dtype = W.device, W.dtype\n if groupsize > 0:\n g_idx = [i // groupsize for i in range(org_shape[-1])]\n g_idx = torch.tensor(g_idx, dtype=torch.int32, device=device)\n else:\n g_idx = torch.tensor([])\n \n groupsize = groupsize if groupsize > 0 else org_shape[-1]\n\n grid = 20\n best_scale = torch.ones([W.shape[1] // groupsize, W.shape[0]],dtype=torch.bfloat16, device=device)\n best_zero = torch.ones([W.shape[1] // groupsize, W.shape[0]],dtype=torch.bfloat16, device=device)\n assert org_shape[1] % groupsize == 0\n assert self.quantizer.sym == False\n for nn in range(org_shape[1] // groupsize):\n W_t = W[:,nn*groupsize:(nn+1)*groupsize]\n inp_t = self.inp1[:,nn*groupsize:(nn+1)*groupsize]\n org_out = inp_t@(W_t.t())\n W_max = W_t.amax(dim=-1, keepdim=True)\n W_min = W_t.amin(dim=-1, keepdim=True)\n best = torch.full([W.shape[0]], float('inf'), device=device, dtype=dtype)\n for i in range(grid):\n ratio = 1.02 - (i+1) / grid*0.22\n W_t = W_t.clamp(W_min*ratio, W_max*ratio)\n qscale = (W_max*ratio - W_min*ratio) / self.quantizer.maxq\n qzero = torch.round(- W_min*ratio / qscale)\n qtensor = torch.clamp(torch.round(W_t/qscale)+qzero,0,self.quantizer.maxq)\n W_qt = qscale*(qtensor-qzero)\n out = inp_t@(W_qt.t())\n mse = (org_out - out).abs().pow(2).mean(dim=0).view(-1)\n best_idx = (best > mse).view(-1)\n best[best_idx] = mse[best_idx]\n best_scale[nn][best_idx] = qscale[best_idx].view(-1)\n best_zero[nn][best_idx] = qzero[best_idx].view(-1) \n\n best_scale = best_scale.t()\n best_zero = best_zero.t()\n self.quantizer.scale = best_scale.reshape(-1, 1)\n self.quantizer.zero = best_zero.reshape(-1, 1)\n self.layer.weight.data = self.quantizer.quantize(W.view(-1, groupsize)).to(self.layer.weight.data.dtype).view(org_shape)\n best_scale8 = torch.zeros((W.shape[0],), dtype=torch.bfloat16, device=device)\n if W4W8:\n grid = 80\n # best_scale = torch.ones([W.shape[0], 1], dtype=torch.float16, device=device)\n org_out = [email protected]()\n best = torch.full([W.shape[0]], float('inf'), device=device, dtype=dtype)\n for i in range(grid):\n ratio = 1.02 - (i+1) / grid*0.82\n # W_max = torch.abs(W_t).max() * ratio\n # \n W_max = W.abs().amax(dim=-1, keepdim=True) * ratio\n qscale_8 = W_max / (2 ** (8-1) - 1)\n qscale = torch.round(best_scale / qscale_8).clamp(min=1.)\n # qtensor = torch.clamp(torch.round(W_t/qscale)+qzero,0,self.quantizer.maxq)\n int_max = torch.floor(127 / qscale)\n # upper = torch.minimum(15, best_zero+int_max)\n # lower = torch.maximum(0, best_zero-int_max)\n inp_t = self.inp1\n upper = torch.clamp(best_zero+int_max, max=15.).reshape(-1, 1)\n lower = torch.clamp(best_zero-int_max, min=0.).reshape(-1, 1)\n qscale_q = (qscale * qscale_8).reshape(-1, 1)\n W_t = W.clamp(-W_max, W_max).view(-1, groupsize)\n q_tensor = torch.clamp(torch.round(W_t/qscale_q) + best_zero.reshape(-1, 1), lower, upper) \n W_qt = qscale_q*(q_tensor-best_zero.reshape(-1, 1))\n W_qt = W_qt.view(org_shape)\n out = inp_t@(W_qt.t())\n mse = (org_out - out).abs().pow(2).mean(dim=0).view(-1)\n best_idx = (best > mse).view(-1)\n best[best_idx] = mse[best_idx]\n best_scale8[best_idx] = qscale_8[best_idx].view(-1) \n W = W.clamp(best_scale8.view(-1, 1) * -127, best_scale8.view(-1, 1) * 127)\n best_scale = torch.round(best_scale / best_scale8.view(-1, 1)).clamp(min=1.)\n int_max = torch.floor(127 / best_scale)\n best_scale_q = (best_scale * best_scale8.view(-1, 1)).reshape(-1, 1)\n upper = torch.clamp(best_zero+int_max, max=15.).reshape(-1, 1)\n lower = torch.clamp(best_zero-int_max, min=0.).reshape(-1, 1)\n q_tensor = torch.clamp(torch.round(W.view(-1, groupsize)/ best_scale_q) + best_zero.reshape(-1, 1), lower, upper)\n self.layer.weight.data = best_scale_q*(q_tensor-best_zero.reshape(-1, 1))\n self.inp1 = None\n return best_scale, best_zero, best_scale8\n\n def gptqquant(self, blocksize=128, percdamp=.01, groupsize=-1, actorder=False, name=''):\n self.layer.to(self.dev)\n\n W = self.layer.weight.data.clone()\n if isinstance(self.layer, nn.Conv2d):\n W = W.flatten(1)\n if isinstance(self.layer, transformers.Conv1D):\n W = W.t()\n W = W.float()\n\n tick = time.time()\n\n if not self.quantizer.ready():\n self.quantizer.find_params(W, weight=True)\n\n H = self.H\n if not self.observe:\n del self.H\n dead = torch.diag(H) == 0\n H[dead, dead] = 1\n W[:, dead] = 0\n\n if actorder:\n perm = torch.argsort(torch.diag(H), descending=True)\n W = W[:, perm]\n H = H[perm][:, perm]\n\n Losses = torch.zeros_like(W)\n Q = torch.zeros_like(W)\n\n damp = percdamp * torch.mean(torch.diag(H))\n diag = torch.arange(self.columns, device=self.dev)\n H[diag, diag] += damp\n H = torch.linalg.cholesky(H)\n H = torch.cholesky_inverse(H)\n H = torch.linalg.cholesky(H, upper=True)\n Hinv = H\n\n g_idx = []\n scale = []\n zero = []\n now_idx = 1\n\n for i1 in range(0, self.columns, blocksize):\n i2 = min(i1 + blocksize, self.columns)\n count = i2 - i1\n\n W1 = W[:, i1:i2].clone()\n Q1 = torch.zeros_like(W1)\n Err1 = torch.zeros_like(W1)\n Losses1 = torch.zeros_like(W1)\n Hinv1 = Hinv[i1:i2, i1:i2]\n\n for i in range(count):\n w = W1[:, i]\n d = Hinv1[i, i]\n\n if groupsize != -1:\n if (i1 + i) % groupsize == 0:\n self.quantizer.find_params(W[:, (i1 + i):(i1 + i + groupsize)], weight=True)\n\n if ((i1 + i) // groupsize) - now_idx == -1:\n scale.append(self.quantizer.scale)\n zero.append(self.quantizer.zero)\n now_idx += 1\n\n q = self.quantizer.quantize(w.unsqueeze(1)).flatten()\n Q1[:, i] = q\n Losses1[:, i] = (w - q)**2 / d**2\n\n err1 = (w - q) / d\n W1[:, i:] -= err1.unsqueeze(1).matmul(Hinv1[i, i:].unsqueeze(0))\n Err1[:, i] = err1\n\n Q[:, i1:i2] = Q1\n Losses[:, i1:i2] = Losses1 / 2\n\n W[:, i2:] -= Err1.matmul(Hinv[i1:i2, i2:])\n\n torch.cuda.synchronize()\n error = torch.sum(Losses).item()\n\n groupsize = groupsize if groupsize != -1 else self.columns\n g_idx = [i // groupsize for i in range(self.columns)]\n g_idx = torch.tensor(g_idx, dtype=torch.int32, device=Q.device)\n if actorder:\n invperm = torch.argsort(perm)\n Q = Q[:, invperm]\n g_idx = g_idx[invperm]\n\n if isinstance(self.layer, transformers.Conv1D):\n Q = Q.t()\n\n self.print_loss(name=name, q_weight=Q, weight_error=error, timecost=(time.time() - tick))\n\n if scale == []:\n scale.append(self.quantizer.scale)\n zero.append(self.quantizer.zero)\n scale = torch.cat(scale, dim=1)\n zero = torch.cat(zero, dim=1)\n return scale, zero, g_idx, error\n\n def free(self):\n self.inp1 = None\n self.out1 = None\n self.H = None\n self.Losses = None\n self.Trace = None\n torch.cuda.empty_cache()" }, { "identifier": "kvquant", "path": "dgq/quant/kvquanter.py", "snippet": "def kvquant(layer):\n for mod in layer.modules():\n if isinstance(mod, ATTENTION_CLASS):\n mod.q_quant.scale = 2 * mod.q_quant.qkv_absmax.max() / mod.q_quant.maxq\n mod.q_quant.zero = torch.full_like(mod.q_quant.scale, (mod.q_quant.maxq + 1) / 2)\n mod.k_quant.scale = 2 * mod.k_quant.qkv_absmax.max() / mod.k_quant.maxq\n mod.k_quant.zero = torch.full_like(mod.k_quant.scale, (mod.k_quant.maxq + 1) / 2)\n mod.v_quant.scale = 2 * mod.v_quant.qkv_absmax.max() / mod.v_quant.maxq\n mod.v_quant.zero = torch.full_like(mod.v_quant.scale, (mod.v_quant.maxq + 1) / 2)\n delattr(mod.q_quant, \"qkv_absmax\")\n delattr(mod.k_quant, \"qkv_absmax\")\n delattr(mod.v_quant, \"qkv_absmax\")" }, { "identifier": "find_layers", "path": "dgq/utils/modelutils.py", "snippet": "def find_layers(module, layers=[nn.Conv2d, nn.Linear], name=''):\n if type(module) in layers:\n return {name: module}\n res = {}\n for name1, child in module.named_children():\n res.update(find_layers(child, layers=layers, name=name + '.' + name1 if name != '' else name1))\n return res" }, { "identifier": "move_embed", "path": "dgq/utils/modelutils.py", "snippet": "def move_embed(model, device):\n if isinstance(model, LlamaForCausalLM):\n model.model.embed_tokens = model.model.embed_tokens.to(device)\n elif isinstance(model, OPTForCausalLM):\n model.model.decoder.embed_tokens = model.model.decoder.embed_tokens.to(device)\n model.model.decoder.embed_positions = model.model.decoder.embed_positions.to(device)\n elif isinstance(model, BloomForCausalLM):\n model.transformer.word_embeddings = model.transformer.word_embeddings.to(device)\n model.transformer.word_embeddings_layernorm = model.transformer.word_embeddings_layernorm.to(device)\n elif \"mpt\" in str(model.__class__).lower():\n model.transformer.wte = model.transformer.wte.to(device)\n model.transformer.emb_drop = model.transformer.emb_drop.to(device)\n elif \"falcon\" in str(model.__class__).lower():\n model.transformer.word_embeddings = model.transformer.word_embeddings.to(device)\n else:\n raise NotImplementedError(type(model))" }, { "identifier": "get_blocks", "path": "dgq/utils/modelutils.py", "snippet": "def get_blocks(model):\n if isinstance(model, LlamaForCausalLM):\n layers = model.model.layers\n elif isinstance(model, OPTForCausalLM):\n layers = model.model.decoder.layers\n elif isinstance(model, BloomForCausalLM):\n layers = model.transformer.h\n elif \"mpt\" in str(model.__class__).lower():\n layers = model.transformer.blocks\n elif \"falcon\" in str(model.__class__).lower():\n layers = model.transformer.h\n else:\n raise NotImplementedError(type(model))\n return layers" } ]
import torch import torch.nn as nn from dgq.quant.smooth_hooker import prepare_hook from dgq.quant.smooth import mean_bias, smooth_module from dgq.quant.quant_linear import QuantLinear from dgq.quant.quantizer_helper import QuantizerHelper from dgq.quant.kvquanter import kvquant from dgq.utils.modelutils import find_layers, move_embed, get_blocks
7,985
__all__ = ["quant_sequential"] def set_quant_state(module, actq, wtq): for mod in module.modules(): if isinstance(mod, QuantLinear): mod.setquant(actq, wtq) @torch.no_grad() def PTQ(model, enc, qconfig, nsamples=128, seqlen=2048): dev = "cuda:0"
__all__ = ["quant_sequential"] def set_quant_state(module, actq, wtq): for mod in module.modules(): if isinstance(mod, QuantLinear): mod.setquant(actq, wtq) @torch.no_grad() def PTQ(model, enc, qconfig, nsamples=128, seqlen=2048): dev = "cuda:0"
layers = get_blocks(model)
8
2023-11-01 13:45:16+00:00
12k
noco-ai/elemental-golem
server.py
[ { "identifier": "install_skill", "path": "application/download.py", "snippet": "def install_skill(all_skills, install_skill_data, shared_models, server_id, channel):\n # Create a list to hold all the processes\n processes = []\n for skill in all_skills:\n if skill[\"routing_key\"] != install_skill_data[\"routing_key\"]:\n continue\n\n if \"model\" in skill:\n for model in skill[\"model\"]:\n process = multiprocessing.Process(target=download_model, args=(model, install_skill_data, shared_models, server_id, channel))\n processes.append(process)\n process.start()\n \n if \"repository\" in skill:\n for repo in skill[\"repository\"]:\n # Create and start a new process for each download\n process = multiprocessing.Process(target=download_repo, args=(repo[\"url\"], repo[\"folder\"], repo[\"module_path\"]))\n processes.append(process)\n process.start()" }, { "identifier": "get_system_info", "path": "application/system_info.py", "snippet": "def get_system_info(server_id, gpu_type):\n # network info\n hostname = socket.gethostname()\n system_info = {\n \"server_id\": server_id,\n \"hostname\": hostname\n }\n\n # RAM information\n mem_info = psutil.virtual_memory()\n system_info[\"ram\"] = {\n \"total\": mem_info.total,\n \"available\": mem_info.available,\n \"used\": mem_info.used,\n \"percent_used\": mem_info.percent\n }\n\n # CPU information\n system_info[\"cpu\"] = {\n \"count\": psutil.cpu_count(),\n \"percent_used\": psutil.cpu_percent()\n }\n\n # Hard drive information\n disk_usage = psutil.disk_usage(os.path.abspath(os.sep))\n system_info[\"hd\"] = {\n \"total\": disk_usage.total,\n \"used\": disk_usage.used,\n \"free\": disk_usage.free,\n \"percent_used\": disk_usage.percent\n }\n \n system_info[\"gpu\"] = []\n gpu_names = {}\n\n # NVIDIA GPU information\n if gpu_type == \"nvidia\": \n nvmlInit()\n device_count = nvmlDeviceGetCount()\n for i in range(device_count):\n handle = nvmlDeviceGetHandleByIndex(i)\n name = nvmlDeviceGetName(handle)\n mem_info = nvmlDeviceGetMemoryInfo(handle)\n utilization = nvmlDeviceGetUtilizationRates(handle)\n\n # rename gpu if we have more than more of the same type\n if name in gpu_names:\n gpu_name = f\"{name} #{gpu_names[name]}\"\n gpu_names[name] += 1\n else:\n gpu_name = name\n gpu_names[name] = 2\n\n system_info[\"gpu\"].append({\n \"device\": f\"cuda:{i}\",\n \"name\": gpu_name,\n \"memory_total\": mem_info.total,\n \"memory_used\": mem_info.used,\n \"memory_free\": mem_info.free,\n \"gpu_utilization\": utilization.gpu,\n \"memory_utilization\": utilization.memory\n })\n\n nvmlShutdown()\n\n # rename multiple gpus\n for gpu in system_info[\"gpu\"]:\n if gpu[\"name\"] in gpu_names and gpu_names[gpu[\"name\"]] > 2:\n gpu[\"name\"] = f\"{gpu['name']} #1\" \n\n return system_info" }, { "identifier": "load_configs", "path": "application/system_info.py", "snippet": "def load_configs(base_dir, vault_client, vault_root, server_id, gpu_type):\n # Return data\n all_skills = []\n all_configs = {}\n all_models = []\n all_repos = []\n script_map = {}\n loaded_handlers = []\n\n # load custom skills\n custom_skill_map = {}\n custom_skills = [] \n try: \n filename = f\"data/{server_id}_custom.json\"\n with open(filename, 'r') as file:\n custom_skills = json.load(file)\n except FileNotFoundError:\n pass\n\n for custom_skill in custom_skills:\n golem_module_path = f\"modules/{custom_skill['golem_module']}\"\n if golem_module_path not in custom_skill_map:\n custom_skill_map[golem_module_path] = []\n\n custom_skill_map[golem_module_path].append(custom_skill) \n\n # Walk through the directory\n for dir_path, dir_names, file_names in os.walk(base_dir):\n\n # Check each file in the current directory\n for file_name in file_names:\n\n # If the file is not a golem.json file\n if file_name != \"golem.json\":\n continue\n\n # Construct the full path to the file\n full_path = os.path.join(dir_path, file_name)\n\n # Open the file and load the JSON\n with open(full_path, 'r') as f:\n config = json.load(f)\n\n # Save the loaded config to the dictionary\n script_path = os.path.join(dir_path, config[\"script\"])\n config[\"script_path\"] = script_path\n all_configs[dir_path] = config\n\n if \"supported_gpu\" in config and gpu_type not in config[\"supported_gpu\"]:\n logger.info(f\"skipping handler {config['label']}, gpu not supported\")\n continue\n\n if \"repository\" in config:\n for repo in config[\"repository\"]:\n all_repos.append(repo[\"folder\"])\n\n # If the \"skills\" key exists in the JSON, append its contents to the all_models array\n if \"skills\" in config:\n if dir_path in custom_skill_map:\n config[\"skills\"].extend(custom_skill_map[dir_path])\n\n loaded_handlers.append({\n \"unique_key\": config.get(\"unique_key\", \"\"),\n \"label\": config.get(\"label\", \"\"),\n \"description\": config.get(\"description\", \"\")\n })\n global_repos = config.get(\"repository\", [])\n global_configuration = config.get(\"configuration\", {}) # Get the global configuration\n global_config_dict = {option[\"name\"]: option[\"default\"] for option in global_configuration.get(\"options\", [])}\n vault_path = global_configuration.get(\"vault_path\", \"\") \n\n for skill in config[\"skills\"]: \n vault_data = {}\n if vault_path:\n try: \n config_path = f'{vault_root}/data/{vault_path}/{skill[\"routing_key\"]}'\n vault_data_resp = vault_client.read(path=config_path)\n vault_data = {} if vault_data_resp == None else vault_data_resp['data']['data']\n except Exception as e:\n pass # no need to log just means no override data has been set\n\n module_name = dir_path.split(\"modules/\")[1]\n skill[\"golem_module\"] = module_name\n skill[\"raw\"] = json.dumps(skill, indent=2)\n skill[\"handler_key\"] = config.get(\"unique_key\", \"\")\n skill_configuration = skill.get(\"configuration\", {})\n merged_config = {**global_config_dict, **skill_configuration, **vault_data} # Merge global, skill level and vault configurations\n skill[\"configuration\"] = merged_config # Replace the skill configuration with the merged configuration\n skill[\"configuration_template\"] = global_configuration.copy()\n skill[\"repository\"] = global_repos.copy() \n for repo in skill[\"repository\"]:\n repo[\"module_path\"] = dir_path\n \n skill[\"secrets\"] = {}\n\n if \"vault_path\" in skill[\"configuration_template\"]:\n skill[\"configuration_template\"][\"vault_path\"] = skill[\"configuration_template\"][\"vault_path\"] + \"/\" + skill[\"routing_key\"] \n\n skill[\"multi_gpu_support\"] = True if \"multi_gpu_support\" in config and config[\"multi_gpu_support\"] == True else False\n \n # protect sensetive data\n if \"options\" in skill[\"configuration_template\"]:\n for option in skill[\"configuration_template\"][\"options\"]: \n if option[\"type\"] == \"secret\":\n skill[\"secrets\"][option[\"name\"]] = merged_config[option[\"name\"]]\n merged_config[option[\"name\"]] = \"SECRET\"\n \n all_skills.append(skill) \n script_map[skill[\"routing_key\"]] = script_path\n\n if \"model\" not in skill:\n continue\n\n for model in skill[\"model\"]:\n if \"files\" in model:\n for file in model[\"files\"]:\n model_full_path = os.path.join(model[\"name\"], model[\"files\"][file])\n lock_file = hashlib.sha256(model_full_path.encode()).hexdigest()[:10] + \".lock\"\n all_models.append({\"path\": model_full_path, \"lock_file\": lock_file })\n if \"branch\" in model:\n for file in model[\"branch\"]:\n model_full_path = os.path.join(model[\"name\"], model[\"branch\"][file])\n lock_file = hashlib.sha256(model_full_path.encode()).hexdigest()[:10] + \".lock\"\n all_models.append({\"path\": model_full_path, \"lock_file\": lock_file })\n else:\n model_full_path = model[\"name\"]\n lock_file = hashlib.sha256(model_full_path.encode()).hexdigest()[:10] + \".lock\"\n all_models.append({\"path\": model_full_path, \"lock_file\": lock_file }) \n\n return all_skills, all_configs, all_models, all_repos, script_map, loaded_handlers" }, { "identifier": "load_enabled_skills", "path": "application/system_info.py", "snippet": "def load_enabled_skills(server_id: str) -> dict:\n # Check if the file exists\n if not os.path.exists(f'data/{server_id}_skills.json'):\n logger.info(f\"file data/{server_id}_skills.json does not exist\")\n return {}\n\n try:\n with open(f'data/{server_id}_skills.json', 'r') as f:\n enabled_skills = json.load(f)\n except json.JSONDecodeError:\n logger.info(f\"invalid json in data/{server_id}_skills.json\")\n return {}\n\n # Prepare an empty dictionary to hold valid skills\n enabled_skills_dict = {}\n\n # Define the expected keys and their data types\n expected_keys = {\"routing_key\": str, \"device\": str, \"use_precision\": str}\n\n for item in enabled_skills:\n # Check if item contains all expected keys, their values are of the correct data types,\n # and no additional keys are present\n if (set(item.keys()) == set(expected_keys.keys()) and\n all(isinstance(item[key], expected_keys[key]) for key in expected_keys)):\n\n if item['routing_key'] not in enabled_skills_dict:\n enabled_skills_dict[item['routing_key']] = []\n\n enabled_skills_dict[item['routing_key']].extend([item])\n else:\n logger.error(f\"tnvalid skill data: {item}\")\n\n return enabled_skills_dict" }, { "identifier": "connect_to_amqp", "path": "application/amqp.py", "snippet": "def connect_to_amqp(amqp_ip, amqp_user, amqp_password, amqp_vhost): \n\n # Otherwise, establish a new connection for this process\n connection_successful = True\n try:\n credentials = pika.PlainCredentials(amqp_user, amqp_password)\n connection = pika.BlockingConnection(\n pika.ConnectionParameters(\n host=amqp_ip,\n virtual_host=amqp_vhost,\n credentials=credentials,\n connection_attempts=5,\n retry_delay=5,\n socket_timeout=600\n )\n )\n channel = connection.channel()\n \n except Exception as e:\n connection_successful = False\n logger.error(f\"failed to connect\", e)\n \n return connection_successful, connection, channel" }, { "identifier": "become_consumer", "path": "application/amqp.py", "snippet": "def become_consumer(channel, queue_name, callback_function):\n channel.basic_consume(queue=queue_name, on_message_callback=callback_function, auto_ack=False)\n channel.start_consuming()" }, { "identifier": "bind_queue_to_exchange", "path": "application/amqp.py", "snippet": "def bind_queue_to_exchange(channel, queue_name, exchange_name, routing_key=None):\n channel.queue_bind(exchange=exchange_name, queue=queue_name, routing_key=routing_key)" }, { "identifier": "create_exchange", "path": "application/amqp.py", "snippet": "def create_exchange(channel, exchange_name, exchange_type='direct'):\n channel.exchange_declare(exchange=exchange_name, exchange_type=exchange_type)" }, { "identifier": "create_queue", "path": "application/amqp.py", "snippet": "def create_queue(channel, queue_name, dlx=None, dlx_queue='deadletters', is_exclusive=False, is_auto_delete=False):\n \n # Declare the queue with 'dlx' as the DLX if provided\n if dlx:\n result = channel.queue_declare(queue=queue_name, exclusive=is_exclusive, auto_delete=is_auto_delete, arguments={\n 'x-dead-letter-exchange': dlx,\n 'x-dead-letter-routing-key': dlx_queue\n })\n else:\n result = channel.queue_declare(queue=queue_name, exclusive=is_exclusive, auto_delete=is_auto_delete)\n\n return result.method.queue" }, { "identifier": "send_message_to_exchange", "path": "application/amqp.py", "snippet": "def send_message_to_exchange(channel, exchange_name, routing_key, message, headers=None):\n properties = pika.BasicProperties(delivery_mode=2) # make message persistent\n if headers is not None:\n properties.headers = headers\n\n channel.basic_publish(exchange=exchange_name,\n routing_key=routing_key,\n body=message,\n properties=properties)" }, { "identifier": "start_worker_threads", "path": "application/thread.py", "snippet": "def start_worker_threads(all_skills, skills_config, amqp_params, script_map, server_id):\n \n # Iterate through pipelines in the config\n for skill in all_skills:\n routing_key = skill[\"routing_key\"]\n\n # Skip the pipeline if the name is not found in the devices_and_status_dict\n device_and_status = skills_config.get(routing_key)\n if device_and_status is None:\n continue\n\n for to_device in device_and_status:\n\n # Create a new process for each consumer\n stop_generation_event = multiprocessing.Event()\n stop_generation_filter = multiprocessing.Array(ctypes.c_char, 128)\n stop_event = multiprocessing.Event()\n thread_status = multiprocessing.Array(ctypes.c_char, 24)\n config_event = multiprocessing.Event()\n thread_config = multiprocessing.Array(ctypes.c_char, 4096)\n thread_status.raw = bytes(\"STARTING\", \"utf-8\")\n process = multiprocessing.Process(target=worker_thread, args=(amqp_params, stop_event, stop_generation_event, stop_generation_filter, \n thread_status, config_event, thread_config, to_device, skill, script_map, server_id))\n process.start()\n\n device = to_device[\"device\"]\n ram = skill[\"memory_usage\"][to_device[\"use_precision\"]]\n worker_threads.extend([{ \"process\": process, \n \"routing_key\": routing_key, \"device\": device, \"ram\": \n ram, \"use_precision\": to_device[\"use_precision\"], \"stop_event\": stop_event, \"stop_generation_event\": stop_generation_event,\n \"stop_generation_filter\": stop_generation_filter, \"thread_status\": thread_status, \"config_event\": config_event, \"thread_config\": thread_config}]) " }, { "identifier": "stop_worker_thread", "path": "application/thread.py", "snippet": "def stop_worker_thread(skill_details, amqp_channel):\n \n for i, thread in enumerate(worker_threads):\n if thread[\"routing_key\"] == skill_details[\"routing_key\"] and thread[\"device\"] == skill_details[\"device\"] and thread[\"use_precision\"] == skill_details[\"use_precision\"]: \n logger.info(f\"stopping thread for {skill_details['routing_key']}\") \n thread[\"thread_status\"].raw = bytes('\\0' * 24, 'utf-8') \n thread[\"thread_status\"].raw = bytes(\"STOPPING\", \"utf-8\") \n thread[\"stop_event\"].set()\n send_message_to_exchange(amqp_channel, \"golem_skill\", skill_details[\"routing_key\"], \"STOP\", None) \n while True:\n thread_string = bytes(thread[\"thread_status\"].raw).rstrip(b'\\x00').decode(\"utf-8\")\n if thread_string == \"STOPPED\":\n break\n\n thread[\"process\"].join()\n del worker_threads[i]\n return " }, { "identifier": "get_worker_threads", "path": "application/thread.py", "snippet": "def get_worker_threads():\n return worker_threads" }, { "identifier": "stop_all_threads", "path": "application/thread.py", "snippet": "def stop_all_threads(amqp_channel):\n \n for i, thread in enumerate(worker_threads):\n logger.info(f\"stopping thread for {thread['routing_key']}\") \n thread[\"thread_status\"].raw = bytes('\\0' * 24, 'utf-8') \n thread[\"thread_status\"].raw = bytes(\"STOPPING\", \"utf-8\") \n thread[\"stop_event\"].set()\n send_message_to_exchange(amqp_channel, \"golem_skill\", thread[\"routing_key\"], \"STOP\", None) \n while True:\n time.sleep(2)\n thread_string = bytes(thread[\"thread_status\"].raw).rstrip(b'\\x00').decode(\"utf-8\")\n if thread_string == \"STOPPED\":\n break\n\n thread[\"process\"].join()\n del worker_threads[i]" }, { "identifier": "update_thread_configuration", "path": "application/thread.py", "snippet": "def update_thread_configuration(vault_root, vault_client, vault_path):\n \n config_path = f'{vault_root}/data/{vault_path}'\n logger.info(f\"updating thread configuration for {config_path}\")\n vault_data_resp = vault_client.read(path=config_path)\n vault_data = {} if vault_data_resp == None else vault_data_resp['data']['data']\n path_parts = vault_path.split('/')\n unique_key = path_parts[-1]\n json_dump = json.dumps(vault_data)\n if len(json_dump) >= 4096:\n #logger.error(f\"error: configuraation json longer than buffer\")\n return {}\n \n for thread in worker_threads:\n if thread[\"routing_key\"] != unique_key:\n continue\n \n thread[\"thread_config\"].raw = bytes('\\0' * 4096, 'utf-8') \n thread[\"thread_config\"].raw = bytes(json_dump, \"utf-8\")\n thread[\"config_event\"].set()\n \n return vault_data" }, { "identifier": "stop_thread_generation", "path": "application/thread.py", "snippet": "def stop_thread_generation(stop_details):\n \n for i, thread in enumerate(worker_threads):\n if thread[\"routing_key\"] == stop_details[\"routing_key\"]: \n logger.info(f\"sending stop generation to {stop_details['routing_key']}\")\n thread[\"stop_generation_filter\"].raw = bytes('\\0' * 128, 'utf-8')\n thread[\"stop_generation_filter\"].raw = bytes(stop_details[\"socket_id\"], \"utf-8\") \n thread[\"stop_generation_event\"].set()\n return" } ]
import logging import argparse import time import os import json import hashlib import hvac from typing import Dict from application.download import install_skill from application.system_info import get_system_info, load_configs, load_enabled_skills from application.amqp import connect_to_amqp, become_consumer, bind_queue_to_exchange from application.amqp import create_exchange, create_queue, send_message_to_exchange from application.thread import start_worker_threads, stop_worker_thread, get_worker_threads, stop_all_threads, update_thread_configuration, stop_thread_generation
7,298
# create dead letter exchange and queue create_exchange(amqp_channel, 'deadletter') flx_queue = create_queue(channel=amqp_channel, queue_name='deadletters') bind_queue_to_exchange(amqp_channel, 'deadletters', 'deadletter') # create exchange and queue for this server create_exchange(amqp_channel, 'golem') create_exchange(amqp_channel, 'golem_broadcast', 'fanout') create_exchange(amqp_channel, 'arcane_bridge_broadcast', 'fanout') create_queue(channel=amqp_channel, queue_name=server_id, is_auto_delete=True, dlx="deadletter") bind_queue_to_exchange(amqp_channel, server_id, 'golem') bind_queue_to_exchange(amqp_channel, server_id, 'golem_broadcast') # start all the pipe threads create_exchange(amqp_channel, 'golem_skill') # define server call back for answering messages def server_callback(ch, method, properties, body): global all_skills, all_configs, all_models, all_repos, script_map, loaded_handlers if "command" not in properties.headers or "return_routing_key" not in properties.headers or "return_exchange" not in properties.headers: logger.info("command or return routing not found in header. command, return_route_key, and return_exchange are required headers") amqp_channel.basic_reject(delivery_tag=method.delivery_tag, requeue=False) return logger.info(f"incoming command {properties.headers['command']}") try: headers = {} command = properties.headers.get('command') return_key = properties.headers.get('return_routing_key') return_exchange = properties.headers.get('return_exchange') for key, value in properties.headers.items(): # Exclude return_exchange and return_routing_key if key not in ['return_exchange', 'return_routing_key', 'x-delay']: headers[key] = value if command == "system_info": installed_models, installed_repos, downloading_models = check_data_directories(all_models, all_repos) # get list of installed models system_info = get_system_info(server_id, args.gpu_type) system_info["server_id"] = server_id system_info["server_label"] = server_id.replace("_", "-") system_info["installed_models"] = installed_models system_info["downloading_models"] = downloading_models system_info["installed_repository"] = installed_repos system_info["handlers"] = loaded_handlers # protect secrets from the UI stripped_skills = [{k: v for k, v in skill.items() if k != "secrets"} for skill in all_skills] system_info["installed_skills"] = stripped_skills running_skills = [] system_info["status"] = "ONLINE" worker_threads = get_worker_threads() for thread in worker_threads: thread_status = thread["thread_status"].raw.decode().rstrip('\0') if thread_status != "ONLINE": system_info["status"] = "STARTING" running_skills.extend([{"device":thread["device"], "routing_key": thread["routing_key"], "ram": thread["ram"] * 1000000, "use_precision": thread["use_precision"], "thread_status": thread_status }]) system_info["running_skills"] = running_skills send_message_to_exchange(amqp_channel, return_exchange, return_key, json.dumps(system_info).encode(), headers) amqp_channel.basic_ack(delivery_tag=method.delivery_tag) return elif command == "run_skill": skill_details = json.loads(body) add_skill(skill_details, server_id) run_map = {skill_details["routing_key"]: [skill_details]} start_worker_threads(all_skills, run_map, amqp_params, script_map, server_id) amqp_channel.basic_ack(delivery_tag=method.delivery_tag) return elif command == "stop_skill": skill_details = json.loads(body) remove_skill(skill_details, server_id) stop_worker_thread(skill_details, amqp_channel) amqp_channel.basic_ack(delivery_tag=method.delivery_tag) return elif command == "install_skill": skill_details = json.loads(body) install_skill(all_skills, skill_details, args.shared_models, server_id, amqp_channel) amqp_channel.basic_ack(delivery_tag=method.delivery_tag) logger.info('skill installing 🚀') return elif command == "custom_skill": skill_details = json.loads(body) install_custom_skill(skill_details, server_id) all_skills, all_configs, all_models, all_repos, script_map, loaded_handlers = load_configs('modules', vault_client, args.vault_root, server_id) amqp_channel.basic_ack(delivery_tag=method.delivery_tag) logger.info('custom skill installed 🚀') return elif command == "stop_generation": stop_details = json.loads(body) stop_thread_generation(stop_details) logger.info(stop_details) amqp_channel.basic_ack(delivery_tag=method.delivery_tag) logger.info('generation stopped 🛑') return elif command == "update_configuration": details = json.loads(body) vault_data = update_thread_configuration(args.vault_root, vault_client, details["vault_path"]) for skill in all_skills: if "configuration_template" in skill and "vault_path" in skill["configuration_template"] and skill["configuration_template"]["vault_path"] == details["vault_path"]: current_config = skill["configuration"] merged_config = {**current_config, **vault_data} skill["configuration"] = merged_config amqp_channel.basic_ack(delivery_tag=method.delivery_tag) logger.info('configuration updated 🔧') return except Exception as e: logger.error("an error occurred:", e) amqp_channel.basic_reject(delivery_tag=method.delivery_tag, requeue=False) logger.info(f"command {properties.headers['command']} not found") amqp_channel.basic_reject(delivery_tag=method.delivery_tag, requeue=False)
pika_logger = logging.getLogger("pika") pika_logger.setLevel(logging.WARNING) logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) # local modules # checks what models are installed on the system def check_data_directories(all_models, all_repos): # make sure dirs are present os.makedirs("data/models", exist_ok=True) os.makedirs("data/loras", exist_ok=True) os.makedirs("data/repos", exist_ok=True) # list of installed models available_models = [] downloading_models = [] for model_data in all_models: model_name = model_data["path"] model_directory = f"data/models/{model_name}" lock_file_path = f'data/models/{model_data["lock_file"]}' if os.path.exists(lock_file_path): downloading_models.append(model_name) elif os.path.exists(model_directory): available_models.append(model_name) # list of insalled repors available_repos = [] for repo_name in all_repos: repo_directory = f"data/repos/{repo_name}" if os.path.exists(repo_directory): available_repos.append(repo_name) return available_models, available_repos, downloading_models def add_skill(new_skill: dict, server_id: str): # load existing skills enabled_skills = load_enabled_skills(server_id) # flatten the list of lists into a single list flattened_skills = [skill for sublist in enabled_skills.values() for skill in sublist] # expected keys expected_keys = {"routing_key": str, "device": str, "use_precision": str} # check if new_skill is valid if (set(new_skill.keys()) == set(expected_keys.keys()) and all(isinstance(new_skill[key], expected_keys[key]) for key in expected_keys)): # add new skill to the list flattened_skills.append(new_skill) # save back to file with open(f'data/{server_id}_skills.json', 'w') as f: json.dump(flattened_skills, f) else: logger.info(f"invalid skill data: {new_skill}") def remove_skill(skill_to_remove: dict, server_id: str): # load existing skills enabled_skills = load_enabled_skills(server_id) # flatten the list of lists into a single list flattened_skills = [skill for sublist in enabled_skills.values() for skill in sublist] # iterate over the skills to find the first match and remove it for i, skill in enumerate(flattened_skills): if skill == skill_to_remove: del flattened_skills[i] break # save back to file with open(f'data/{server_id}_skills.json', 'w') as f: json.dump(flattened_skills, f) def fatal_error(error): logger.error(error) time.sleep(3) os._exit(0) def install_custom_skill(skill_details, server_id): data = [] filename = f"data/{server_id}_custom.json" if os.path.exists(filename): with open(filename, 'r') as file: data = json.load(file) if not isinstance(data, list): raise TypeError("The file must contain a JSON array.") data.append(skill_details) with open(filename, 'w') as file: json.dump(data, file, indent=4) def connect_to_vault(vault_url, vault_token_file, vault_root, max_retries=10, sleep_time=8): for retry in range(max_retries): try: # Read the Vault token from file if os.path.isfile(args.vault_token_file): # Read the Vault token from file with open(vault_token_file, 'r') as file: vault_token = file.read().strip() logger.info(f"vault connecting to {vault_url}, vhost: {vault_root}") vault_client = hvac.Client(url=vault_url, token=vault_token, namespace=vault_root) vault_connected = vault_client.is_authenticated() if vault_connected: # give a bit of time for amqp to write its creds vault_data_resp = vault_client.read(path=f'{vault_root}/data/core/amqp') if vault_data_resp == None: raise ValueError('invalid response from vault server') vault_data = vault_data_resp['data']['data'] # Check if all required fields are present required_fields = ['host', 'username', 'password', 'vhost'] if not all(field in vault_data for field in required_fields): missing_fields = [field for field in required_fields if field not in vault_data] raise ValueError(missing_fields) logger.info('successfully connected to vault server') return vault_client, vault_data else: logger.info(f"waiting for token file creation") except Exception as e: print(e) pass time.sleep(sleep_time) logger.info(f"retrying connection to vault server. attempt {retry+1}/{max_retries}") # If connection is not successful after max_retries fatal_error('unable to connect to vault server after multiple attempts.') if __name__ == "__main__": logger.info("starting elemental golem") # Parse command-line arguments parser = argparse.ArgumentParser(description='Vault creds') parser.add_argument('--server-id', required=True, help='Unique server ID') parser.add_argument('--vault-host', required=True, help='Vault server host address') parser.add_argument('--vault-token-file', help='Path to the Vault token file', default='./vault-token') parser.add_argument('--vault-root', help='Root path in the Vault server', default='spellbook') parser.add_argument('--amqp-ip', help='Overrides what is stored in Vault for the amqp ip.') parser.add_argument('--shared-models', required=False, help='Show be set to true is the data/ folder is shared between golem instances or in a docker container.', default=False, type=bool) parser.add_argument('--gpu-type', help='The type of GPU the system has onboard', default='nvidia', choices=['nvidia', 'nogpu']) args = parser.parse_args() vault_client, vault_data = connect_to_vault(args.vault_host, args.vault_token_file, args.vault_root) # connect to amqp amqp_ip = args.amqp_ip if args.amqp_ip != None else vault_data['host'] amqp_params = { 'amqp_ip': amqp_ip, 'amqp_user': vault_data['username'], 'amqp_password': vault_data['password'], 'amqp_vhost': vault_data['vhost'] } server_name = args.server_id server_id = 'golem_' + hashlib.sha256(server_name.encode()).hexdigest()[:10] # load config files all_skills, all_configs, all_models, all_repos, script_map, loaded_handlers = load_configs('modules', vault_client, args.vault_root, server_id, args.gpu_type) # load enabled models json tp dict enabled_skills_dict = load_enabled_skills(server_id) # start threads start_worker_threads(all_skills, enabled_skills_dict, amqp_params, script_map, server_id) # connect to rabbit mq amqp_connected, amqp_connection, amqp_channel = connect_to_amqp(**amqp_params) if amqp_connected == False: fatal_error('unable to connect to amqp server') # create dead letter exchange and queue create_exchange(amqp_channel, 'deadletter') flx_queue = create_queue(channel=amqp_channel, queue_name='deadletters') bind_queue_to_exchange(amqp_channel, 'deadletters', 'deadletter') # create exchange and queue for this server create_exchange(amqp_channel, 'golem') create_exchange(amqp_channel, 'golem_broadcast', 'fanout') create_exchange(amqp_channel, 'arcane_bridge_broadcast', 'fanout') create_queue(channel=amqp_channel, queue_name=server_id, is_auto_delete=True, dlx="deadletter") bind_queue_to_exchange(amqp_channel, server_id, 'golem') bind_queue_to_exchange(amqp_channel, server_id, 'golem_broadcast') # start all the pipe threads create_exchange(amqp_channel, 'golem_skill') # define server call back for answering messages def server_callback(ch, method, properties, body): global all_skills, all_configs, all_models, all_repos, script_map, loaded_handlers if "command" not in properties.headers or "return_routing_key" not in properties.headers or "return_exchange" not in properties.headers: logger.info("command or return routing not found in header. command, return_route_key, and return_exchange are required headers") amqp_channel.basic_reject(delivery_tag=method.delivery_tag, requeue=False) return logger.info(f"incoming command {properties.headers['command']}") try: headers = {} command = properties.headers.get('command') return_key = properties.headers.get('return_routing_key') return_exchange = properties.headers.get('return_exchange') for key, value in properties.headers.items(): # Exclude return_exchange and return_routing_key if key not in ['return_exchange', 'return_routing_key', 'x-delay']: headers[key] = value if command == "system_info": installed_models, installed_repos, downloading_models = check_data_directories(all_models, all_repos) # get list of installed models system_info = get_system_info(server_id, args.gpu_type) system_info["server_id"] = server_id system_info["server_label"] = server_id.replace("_", "-") system_info["installed_models"] = installed_models system_info["downloading_models"] = downloading_models system_info["installed_repository"] = installed_repos system_info["handlers"] = loaded_handlers # protect secrets from the UI stripped_skills = [{k: v for k, v in skill.items() if k != "secrets"} for skill in all_skills] system_info["installed_skills"] = stripped_skills running_skills = [] system_info["status"] = "ONLINE" worker_threads = get_worker_threads() for thread in worker_threads: thread_status = thread["thread_status"].raw.decode().rstrip('\0') if thread_status != "ONLINE": system_info["status"] = "STARTING" running_skills.extend([{"device":thread["device"], "routing_key": thread["routing_key"], "ram": thread["ram"] * 1000000, "use_precision": thread["use_precision"], "thread_status": thread_status }]) system_info["running_skills"] = running_skills send_message_to_exchange(amqp_channel, return_exchange, return_key, json.dumps(system_info).encode(), headers) amqp_channel.basic_ack(delivery_tag=method.delivery_tag) return elif command == "run_skill": skill_details = json.loads(body) add_skill(skill_details, server_id) run_map = {skill_details["routing_key"]: [skill_details]} start_worker_threads(all_skills, run_map, amqp_params, script_map, server_id) amqp_channel.basic_ack(delivery_tag=method.delivery_tag) return elif command == "stop_skill": skill_details = json.loads(body) remove_skill(skill_details, server_id) stop_worker_thread(skill_details, amqp_channel) amqp_channel.basic_ack(delivery_tag=method.delivery_tag) return elif command == "install_skill": skill_details = json.loads(body) install_skill(all_skills, skill_details, args.shared_models, server_id, amqp_channel) amqp_channel.basic_ack(delivery_tag=method.delivery_tag) logger.info('skill installing 🚀') return elif command == "custom_skill": skill_details = json.loads(body) install_custom_skill(skill_details, server_id) all_skills, all_configs, all_models, all_repos, script_map, loaded_handlers = load_configs('modules', vault_client, args.vault_root, server_id) amqp_channel.basic_ack(delivery_tag=method.delivery_tag) logger.info('custom skill installed 🚀') return elif command == "stop_generation": stop_details = json.loads(body) stop_thread_generation(stop_details) logger.info(stop_details) amqp_channel.basic_ack(delivery_tag=method.delivery_tag) logger.info('generation stopped 🛑') return elif command == "update_configuration": details = json.loads(body) vault_data = update_thread_configuration(args.vault_root, vault_client, details["vault_path"]) for skill in all_skills: if "configuration_template" in skill and "vault_path" in skill["configuration_template"] and skill["configuration_template"]["vault_path"] == details["vault_path"]: current_config = skill["configuration"] merged_config = {**current_config, **vault_data} skill["configuration"] = merged_config amqp_channel.basic_ack(delivery_tag=method.delivery_tag) logger.info('configuration updated 🔧') return except Exception as e: logger.error("an error occurred:", e) amqp_channel.basic_reject(delivery_tag=method.delivery_tag, requeue=False) logger.info(f"command {properties.headers['command']} not found") amqp_channel.basic_reject(delivery_tag=method.delivery_tag, requeue=False)
become_consumer(amqp_channel, server_id, server_callback)
5
2023-11-06 19:03:07+00:00
12k
m4rkw/monzo-utils
monzo_utils/lib/monzo_sync.py
[ { "identifier": "Config", "path": "monzo_utils/lib/config.py", "snippet": "class Config(metaclass=Singleton):\n\n def __init__(self, config=None, config_path=None):\n if config_path is None:\n homedir = pwd.getpwuid(os.getuid()).pw_dir\n config_path = f\"{homedir}/.monzo\"\n\n if not os.path.exists(config_path):\n os.mkdir(config_path, 0o755)\n\n self.config_file = f\"{config_path}/config.yaml\"\n\n if config:\n self.config = config\n else:\n if not os.path.exists(self.config_file):\n sys.stderr.write(f\"config file not found: {self.config_file}, run setup first.\\n\")\n sys.exit(1)\n\n self.config = yaml.safe_load(open(self.config_file).read())\n\n\n def __getattr__(self, name):\n if name in self.config:\n return self.config[name]\n\n return object.__getattribute__(self, name)\n\n\n def set(self, key, value):\n self.config[key] = value\n\n\n @property\n def keys(self):\n return self.config.keys()\n\n\n def save(self):\n with open(self.config_file, 'w') as f:\n f.write(yaml.dump(self.config))" }, { "identifier": "DB", "path": "monzo_utils/lib/db.py", "snippet": "class DB(metaclass=Singleton):\n\n def __init__(self, db_config=None, config_path=None):\n if db_config:\n self.config = db_config\n else:\n self.config = Config(None, config_path).db\n\n self.driver = getattr(importlib.import_module(f\"monzo_utils.lib.db_driver.{self.config['driver']}\"), self.config['driver'])(self.config)\n\n self.columns = {}\n\n\n def __getattr__(self, name):\n match = re.match('^find_([\\w]+)_by_(.*?)$', name)\n\n if match:\n table = match.group(1)\n\n if table[0:4] == 'all_':\n table = table[4:]\n find_all = True\n else:\n find_all = False\n\n fields = match.group(2).split('_and_')\n\n def find_object_by_fields(*args, **kwargs):\n sql = \"select * from `\" + table + \"` where (\"\n\n sql_args = []\n\n for i in range(0, len(fields)):\n if i >0:\n sql += \" and \"\n\n if type(args[i]) == list:\n sql += \"(\"\n for j in range(0, len(args[i])):\n if j >0:\n sql += \" or \"\n\n if 'search' in kwargs and type(kwargs['search']) == list and fields[i] in kwargs['search']:\n sql += f\"`{fields[i]}` like %s\"\n sql_args.append('%' + args[i][j] + '%')\n else:\n sql += f\"`{fields[i]}` = %s\"\n sql_args.append(args[i][j])\n\n sql += \")\"\n else:\n if 'search' in kwargs and type(kwargs['search']) == list and fields[i] in kwargs['search']:\n sql += \"`\" + fields[i] + \"` like %s\"\n sql_args.append('%' + args[i] + '%')\n else:\n sql += \"`\" + fields[i] + \"` = %s\"\n sql_args.append(args[i])\n\n sql += \")\"\n\n if 'where' in kwargs:\n for where_clause in kwargs['where']:\n sql += f\" and {where_clause['clause']}\"\n\n if 'params' in where_clause:\n sql_args += where_clause['params']\n\n if 'orderby' in kwargs:\n sql += f\" order by {kwargs['orderby']}\"\n\n if 'orderdir' in kwargs:\n sql += f\" {kwargs['orderdir']}\"\n\n if 'limit' in kwargs:\n sql += f\" limit {kwargs['limit']}\"\n\n if find_all:\n return self.query(sql, sql_args)\n else:\n return self.one(sql, sql_args)\n\n return find_object_by_fields\n else:\n print(\"DB class method missing: %s\" % (name))\n sys.exit(1)\n\n\n def json_params(self, params):\n json_params = []\n\n for param in params:\n if type(param) == datetime.date:\n json_params.append(param.strftime('%Y-%M-%d'))\n elif type(param) == datetime.datetime:\n json_params.append(param.strftime('%Y-%M-%d %H:%M:%S'))\n else:\n json_params.append(param)\n\n return json_params\n\n\n def query(self, sql, params=[]):\n if 'DEBUG' in os.environ and os.environ['DEBUG'] == '1':\n print(\"SQL: %s\" % (sql))\n print(\"PARAMS: %s\" % (json.dumps(self.json_params(params),indent=4)))\n\n result = self.driver.query(sql, params)\n\n if type(result) == list:\n rows = []\n\n for row in result:\n rows.append(self.fix_dates(row))\n\n result = rows\n\n return result\n\n\n def fix_dates(self, row):\n fixed_row = {}\n\n for key in row:\n if type(row[key]) == str:\n m = re.match('^([\\d]{4})-([\\d]{2})-([\\d]{2})$', row[key])\n\n if m:\n fixed_row[key] = datetime.date(int(m.group(1)), int(m.group(2)), int(m.group(3)))\n continue\n\n m = re.match('^([\\d]{4})-([\\d]{2})-([\\d]{2}) ([\\d]{2}):([\\d]{2}):([\\d]{2})$', row[key])\n\n if m:\n fixed_row[key] = datetime.datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)), int(m.group(4)), int(m.group(5)), int(m.group(6)))\n continue\n\n fixed_row[key] = row[key]\n\n return fixed_row\n\n\n def one(self, sql, params=[]):\n rows = self.query(sql, params)\n\n if len(rows) >0:\n return rows[0]\n\n return False\n\n\n def find(self, table):\n self.query_table = table\n self.sel = []\n self.whereClauses = []\n self.whereParams = []\n self.andWhereClauses = []\n self._orderBy = None\n self._orderDir = None\n self._join = []\n self._leftJoin = []\n self._groupBy = None\n\n return self\n\n\n def select(self, select):\n self.sel.append(select)\n\n return self\n\n\n def where(self, where, whereParams):\n self.whereClauses.append(where)\n self.whereParams += whereParams\n\n return self\n\n\n def andWhere(self, where, whereParams):\n self.andWhereClauses.append(where)\n self.whereParams += whereParams\n\n return self\n\n\n def orderBy(self, field, direction='asc'):\n self._orderBy = field\n self._orderDir = direction\n\n return self\n\n\n def join(self, join_table, join_left_col, join_right_col=None):\n if join_right_col:\n self._join.append({\n 'table': join_table,\n 'join_left_col': join_left_col,\n 'join_right_col': join_right_col\n })\n else:\n self._join.append({\n 'table': join_table,\n 'clause': join_left_col\n })\n\n return self\n\n\n def leftJoin(self, join_table, join_left_col, join_right_col, where=None):\n self._leftJoin.append({\n 'table': join_table,\n 'join_left_col': join_left_col,\n 'join_right_col': join_right_col,\n 'where': where\n })\n\n return self\n\n\n def orWhere(self, whereClause, whereParams=[]):\n self.whereType = 'or'\n\n return self.where(whereClause, whereParams)\n\n\n def groupBy(self, groupBy):\n self._groupBy = groupBy\n\n return self\n\n\n def prepare(self):\n if self.sel == []:\n select = '*'\n else:\n select = ''\n\n for i in range(0, len(self.sel)):\n if i >0:\n select += ','\n select += f\"{self.sel[i]}\"\n\n sql = \"select \" + select + \" from `\" + self.query_table + \"`\"\n\n for join in self._join:\n sql += \" join `\" + join['table'] + \"` on \"\n\n if 'clause' in join:\n sql += join['clause']\n else:\n sql += join['join_left_col'] + \" = \" + join['join_right_col']\n\n for join in self._leftJoin:\n sql += \" left join `\" + join['table'] + \"` on \"\n\n if 'clause' in join:\n sql += join['clause']\n else:\n sql += join['join_left_col'] + \" = \" + join['join_right_col']\n\n if len(self.whereClauses) >0:\n sql += \" where (\"\n\n for i in range(0, len(self.whereClauses)):\n if i >0:\n sql += \" or \"\n sql += self.whereClauses[i]\n\n sql += \")\"\n\n for i in range(0, len(self.andWhereClauses)):\n sql += \" and (\" + self.andWhereClauses[i] + \") \"\n\n if self._groupBy:\n sql += \" group by \" + self._groupBy\n\n if self._orderBy:\n sql += \" order by \"\n order_by_fields = self._orderBy.split(',')\n\n for i in range(0, len(order_by_fields)):\n if i >0:\n sql += \",\"\n sql += f\" `{order_by_fields[i].strip()}`\"\n\n if self._orderDir:\n sql += \" \" + self._orderDir\n\n return sql\n\n\n def getone(self):\n sql = self.prepare() + \" limit 1\"\n\n return self.one(sql, self.whereParams)\n\n\n def getall(self):\n rows = []\n\n for row in self.query(self.prepare(), self.whereParams):\n rows.append(row)\n\n return rows\n\n\n def get_raw_query(self):\n sql = self.prepare()\n\n raw_sql = ''\n\n n = 0\n skip = False\n\n for i in range(0, len(sql)):\n if skip:\n skip = False\n continue\n\n if sql[i:i+2] == '%s':\n raw_sql += \"'\" + self.whereParams[n] + \"'\"\n n += 1\n skip = True\n else:\n raw_sql += sql[i]\n\n return raw_sql\n\n\n def update(self, table, _id, data):\n if table not in self.columns:\n self.columns[table] = self.driver.get_columns(table, exclude=['id'])\n\n sql = f\"update `{table}` set\"\n params = []\n\n for i in range(0, len(self.columns[table])):\n if i >0:\n sql += \", \"\n\n sql += f\" `{self.columns[table][i]}` = %s\"\n params.append(data[self.columns[table][i]] if self.columns[table][i] in data else None)\n\n sql += f\" where `id` = %s\"\n params.append(_id)\n\n self.query(sql, params)\n\n\n def create(self, table, data):\n if table not in self.columns:\n self.columns[table] = self.driver.get_columns(table, exclude=['id'])\n\n sql = f\"insert into `{table}` (\"\n params = []\n\n for i in range(0, len(self.columns[table])):\n if i >0:\n sql += \",\"\n\n sql += f\"`{self.columns[table][i]}`\"\n params.append(data[self.columns[table][i]] if self.columns[table][i] in data else None)\n\n sql += f\") VALUES (\"\n\n for i in range(0, len(self.columns[table])):\n if i >0:\n sql += \",\"\n sql += \"%s\"\n\n sql += \")\"\n\n return self.query(sql, params)" }, { "identifier": "Log", "path": "monzo_utils/lib/log.py", "snippet": "class Log(metaclass=Singleton):\n\n def __init__(self):\n homedir = pwd.getpwuid(os.getuid()).pw_dir\n self.logfile = f\"{homedir}/.monzo/logfile\"\n\n\n def info(self, message):\n self.log(inspect.currentframe().f_code.co_name, message)\n\n\n def warning(self, message):\n self.log(inspect.currentframe().f_code.co_name, message)\n\n\n def error(self, message):\n self.log(inspect.currentframe().f_code.co_name, message)\n\n\n def fatal(self, message):\n self.log(inspect.currentframe().f_code.co_name, message)\n\n\n def log(self, level, message):\n log_line = \"%s: %s - %s\\n\" % (\n datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n level.upper(),\n message\n )\n\n with open(self.logfile, 'a+') as f:\n f.write(log_line)\n\n if sys.stdin.isatty():\n if level == 'info':\n sys.stdout.write(log_line)\n sys.stdout.flush()\n else:\n sys.stderr.write(log_line)\n sys.stderr.flush()\n\n self.rotate()\n\n\n def rotate(self):\n if os.stat(self.logfile).st_size >= MAX_SIZE_MB * 1024 * 1024:\n for i in reversed(list(range(1, MAX_FILES))):\n filename = '%s.%d' % (self.logfile, i)\n next_filename = '%s.%d' % (self.logfile, i+1)\n\n if i+1 == MAX_FILES:\n if os.path.exists(filename):\n os.remove(filename)\n else:\n if os.path.exists(filename):\n os.rename(filename, next_filename)\n\n if os.path.exists(self.logfile):\n os.rename(self.logfile, '%s.1' % (self.logfile))" }, { "identifier": "MonzoAPI", "path": "monzo_utils/lib/monzo_api.py", "snippet": "class MonzoAPI:\n\n def __init__(self):\n homedir = pwd.getpwuid(os.getuid()).pw_dir\n monzo_dir = f\"{homedir}/.monzo\"\n self.token_file = f\"{monzo_dir}/tokens\"\n\n self.load_tokens()\n\n self.client = self.get_client()\n\n\n def load_tokens(self):\n if os.path.exists(self.token_file):\n data = json.loads(open(self.token_file).read())\n\n self.access_token = data['access_token']\n self.access_token_expiry = data['expiry']\n self.refresh_token = data['refresh_token']\n else:\n self.authenticate()\n\n \n def authenticate(self):\n client = Authentication(\n client_id=Config().client_id,\n client_secret=Config().client_secret,\n redirect_url=Config().redirect_url\n )\n\n if not sys.stdout.isatty():\n if 'email' in Config().keys:\n os.system(\"echo '%s'| mail -s 'Monzo auth required' '%s'\" % (client.authentication_url, Config().email))\n Log().error('Authentication required, unable to sync.')\n sys.exit(1)\n\n print(\"\\nAuthentication required, check email or visit:\\n\")\n print(client.authentication_url)\n\n if os.path.exists(Config().oauth_token_file):\n os.remove(Config().oauth_token_file)\n\n while not os.path.exists(Config().oauth_token_file):\n time.sleep(1)\n\n data = json.loads(open(Config().oauth_token_file).read().rstrip())\n\n os.remove(Config().oauth_token_file)\n\n try:\n client.authenticate(authorization_token=data['token'], state_token=data['state'])\n except MonzoAuthenticationError:\n Log().error('State code does not match')\n exit(1)\n except MonzoServerError:\n Log().error('Monzo Server Error')\n exit(1)\n\n self.access_token = client.access_token\n self.access_token_expiry = client.access_token_expiry\n self.refresh_token = client.refresh_token\n\n self.save_tokens()\n\n self.client = self.get_client()\n\n print(\"\\nwaiting for authorisation...\")\n\n while 1:\n time.sleep(1)\n\n try:\n self.accounts()\n break\n except MonzoPermissionsError:\n pass\n\n\n def save_tokens(self):\n with open(self.token_file,'w') as f:\n f.write(json.dumps({\n 'access_token': self.access_token,\n 'expiry': self.access_token_expiry,\n 'refresh_token': self.refresh_token\n }))\n\n\n def get_client(self):\n return Authentication(\n client_id=Config().client_id,\n client_secret=Config().client_secret,\n redirect_url=Config().redirect_url,\n access_token=self.access_token,\n access_token_expiry=self.access_token_expiry,\n refresh_token=self.refresh_token\n )\n\n\n def account(self, account_id):\n return monzo.endpoints.account.Account.fetch(self.client, account_id=account_id)\n\n\n def accounts(self, first=True):\n for i in range(0, 3):\n try:\n accounts = monzo.endpoints.account.Account.fetch(self.client)\n\n self.update_tokens()\n\n return accounts\n\n except MonzoHTTPError:\n if first:\n if 'NO_AUTH' in os.environ:\n raise Exception(\"token expired\")\n\n self.authenticate()\n\n return self.accounts(False)\n\n Log().error('auth failed')\n sys.exit(1)\n except MonzoAuthenticationError:\n if first:\n self.authenticate()\n\n return self.accounts(False)\n\n Log().error(\"auth failed\")\n sys.exit(1)\n except MonzoServerError:\n Log().error(\"server error\")\n\n if i == 2:\n sys.exit(1)\n\n time.sleep(5)\n\n except TimeoutError:\n Log().error(\"timeout\")\n\n if i == 2:\n sys.exit(1)\n\n time.sleep(5)\n\n raise Exception(\"failed to retrieve accounts after 3 attempts\")\n\n\n def update_tokens(self):\n if self.access_token == self.client.access_token and \\\n self.access_token_expiry == self.client.access_token_expiry and \\\n self.refresh_token == self.client.refresh_token:\n return\n\n self.access_token = self.client.access_token\n self.access_token_expiry = self.client.access_token_expiry\n self.refresh_token = self.client.refresh_token\n\n self.save_tokens()\n\n\n def transactions(self, account_id, days=3):\n error = None\n\n now = datetime.datetime.utcnow()\n since = now - datetime.timedelta(days=days)\n\n for i in range(0, 3):\n try:\n return monzo.endpoints.transaction.Transaction.fetch(self.client, account_id=account_id, expand=['merchant'], since=since)\n except MonzoPermissionsError as e:\n raise e\n except Exception as e:\n error = str(e)\n\n if i != 2:\n time.sleep(5)\n else:\n raise e\n\n Log().error(\"failed to retrieve transactions: %s\" % (error))\n sys.exit(1)\n\n\n def pots(self, account_id, first=True):\n try:\n pots = monzo.endpoints.pot.Pot.fetch(self.client, account_id=account_id)\n except MonzoHTTPError:\n if first:\n if 'NO_AUTH' in os.environ:\n raise Exception(\"token expired\")\n\n self.authenticate()\n self.client = self.get_client()\n\n return self.pots(account_id, False)\n\n Log().error(\"auth failed\")\n sys.exit(1)\n except MonzoAuthenticationError:\n if first:\n self.authenticate()\n self.client = self.get_client()\n\n return self.pots(account_id, False)\n\n Log().error(\"auth failed\")\n sys.exit(1)\n except TimeoutError:\n Log().error(\"timeout\")\n sys.exit(1)\n\n return pots\n\n\n def withdraw_credit(self, account_id, pot, credit):\n self.load_tokens()\n\n self.client = self.get_client()\n\n pot = monzo.endpoints.pot.Pot.fetch_single(self.client, account_id=account_id, pot_id=pot.pot_id)\n\n dedupe_code = '%s_%s' % (\n pot.pot_id,\n datetime.datetime.now().strftime('%Y%m%d%H')\n )\n\n amount = round(credit * 100)\n\n for i in range(0, 3):\n try:\n monzo.endpoints.pot.Pot.withdraw(self.client, pot=pot, account_id=account_id, amount=amount, dedupe_id=dedupe_code)\n return True\n except Exception as e:\n print(\"failed to withdraw pot money: %s\" % (str(e)))\n\n if i <2:\n time.sleep(3)\n\n return False\n\n\n def deposit_to_pot(self, account_id, pot, shortfall):\n self.load_tokens()\n\n self.client = self.get_client()\n\n pot = monzo.endpoints.pot.Pot.fetch_single(self.client, account_id=account_id, pot_id=pot.pot_id)\n\n dedupe_code = '%s_%s' % (\n pot.pot_id,\n datetime.datetime.now().strftime('%Y%m%d%H')\n )\n\n amount = round(shortfall * 100)\n\n for i in range(0, 3):\n try:\n monzo.endpoints.pot.Pot.deposit(self.client, pot=pot, account_id=account_id, amount=amount, dedupe_id=dedupe_code)\n return True\n except Exception as e:\n print(\"failed to deposit pot money: %s\" % (str(e)))\n\n if i <2:\n time.sleep(3)\n\n return False" }, { "identifier": "Provider", "path": "monzo_utils/model/provider.py", "snippet": "class Provider(BaseModel):\n\n def accounts(self, orderby='name', orderdir='asc', limit=None, order=None):\n accounts = super().related('Account', 'provider_id', self.id, orderby, orderdir, limit)\n\n # return accounts in a specific order\n if order:\n sorted_accounts = []\n\n for account_name in order:\n for account in accounts:\n if account.name == account_name:\n sorted_accounts.append(account)\n break\n\n return sorted_accounts\n\n return accounts" }, { "identifier": "Account", "path": "monzo_utils/model/account.py", "snippet": "class Account(BaseModel):\n\n DISPLAY_KEYS = ['name','sortcode','account_no','balance','available']\n\n\n def __init__(self, attrs={}):\n super().__init__(attrs)\n\n\n def transactions(self, orderby='created_at', orderdir='asc', limit=None):\n return super().related('Transaction', 'account_id', self.id, orderby, orderdir, limit)\n\n\n def pots(self, orderby='name', orderdir='asc', limit=None):\n return super().related('Pot', 'account_id', self.id, orderby, orderdir, limit, deleted=0)\n\n\n @property\n def __dict__(self):\n attrs = {'attrs': self.attrs}\n\n for pot in self.pots(orderby='name'):\n attrs['attrs'][pot.name] = pot.balance\n\n return attrs\n\n\n @property\n def keys(self):\n keys = []\n\n for key in self.DISPLAY_KEYS.copy():\n if '-t' in sys.argv and ((key == 'sortcode' and self.sortcode is None) or \\\n (key == 'account_no' and self.account_no is None)):\n continue\n\n keys.append(key)\n\n for pot in self.pots(orderby='name'):\n if pot.name not in keys:\n keys.append(pot.name)\n\n return keys\n\n\n def last_salary_transaction(self, description, payment_day, salary_minimum):\n return DB().find_transaction_by_account_id_and_declined_and_description(\n self.id,\n 0,\n description,\n orderby='created_at',\n orderdir='desc',\n limit=1,\n search=['description'],\n where=[{\n 'clause': 'money_in >= %s',\n 'params': [salary_minimum]\n }]\n )" }, { "identifier": "Merchant", "path": "monzo_utils/model/merchant.py", "snippet": "class Merchant(BaseModel):\n\n pass" }, { "identifier": "MerchantAddress", "path": "monzo_utils/model/merchant_address.py", "snippet": "class MerchantAddress(BaseModel):\n\n pass" }, { "identifier": "Pot", "path": "monzo_utils/model/pot.py", "snippet": "class Pot(BaseModel):\n pass" }, { "identifier": "Transaction", "path": "monzo_utils/model/transaction.py", "snippet": "class Transaction(BaseModel):\n\n DISPLAY_KEYS = ['date','type','money_in','money_out','pending','description']\n RELATIONSHIPS = {\n 'account': ['`transaction`.account_id', 'account.id'],\n 'transaction_metadata': ['`transaction`.id', 'transaction_metadata.transaction_id'],\n 'pot': ['`transaction`.pot_id', 'pot.id']\n }" }, { "identifier": "Counterparty", "path": "monzo_utils/model/counterparty.py", "snippet": "class Counterparty(BaseModel):\n\n pass" }, { "identifier": "TransactionMetadata", "path": "monzo_utils/model/transaction_metadata.py", "snippet": "class TransactionMetadata(BaseModel):\n\n pass" } ]
import os import sys import time import json import yaml import re import datetime import pwd from pathlib import Path from monzo_utils.lib.config import Config from monzo_utils.lib.db import DB from monzo_utils.lib.log import Log from monzo_utils.lib.monzo_api import MonzoAPI from monzo_utils.model.provider import Provider from monzo_utils.model.account import Account from monzo_utils.model.merchant import Merchant from monzo_utils.model.merchant_address import MerchantAddress from monzo_utils.model.pot import Pot from monzo_utils.model.transaction import Transaction from monzo_utils.model.counterparty import Counterparty from monzo_utils.model.transaction_metadata import TransactionMetadata from monzo.exceptions import MonzoAuthenticationError, MonzoServerError, MonzoHTTPError, MonzoPermissionsError
8,183
continue if 'accounts' in Config().keys and account.account_id in Config().accounts: continue if 'Joint account between' in account.description: account_type = 'Joint Current Account' else: account_type = account.account_type() print(f" id: {account.account_id}") print(f" balance: £{account.balance.balance/100:.2f}") print(f"description: {account.description}") print(f" type: {account_type}") sys.stdout.write("\n") resp = self.prompt_continue('Sync this account? [y/N] ', True) if resp == 'n': continue account_name = self.prompt_input('name for this account') if 'accounts' not in Config().keys: Config().set('accounts', {}) Config().accounts[account.account_id] = { 'name': account_name } if account_type == 'Flex': Config().accounts[account.account_id]['credit_limit'] = self.prompt_input('credit limit', None, False, 'int') else: Config().accounts[account.account_id]['sortcode'] = self.prompt_input('sort code') Config().accounts[account.account_id]['account_no'] = self.prompt_input('account no') sys.stdout.write("\n") Config().save() def prompt_continue(self, prompt='Continue? [y/N] ', boolean=False): while 1: sys.stdout.write(prompt) sys.stdout.flush() resp = sys.stdin.readline().rstrip().lower() if resp == 'n': if boolean: return False print("\nStopping at user request.\n") sys.exit(0) if resp == 'y': break return True def prompt_input(self, prompt, default=None, none_allowed=False, validation=None): while 1: if default is None: sys.stdout.write(f"Enter {prompt}: ") else: sys.stdout.write(f"Enter {prompt} [{default}]: ") sys.stdout.flush() resp = sys.stdin.readline().rstrip() if len(resp) == 0: if default is None and none_allowed is False: continue resp = default if validation == 'int' and resp is not None: try: resp = int(resp) except: sys.stderr.write("\nerror: value must be an integer\n\n") sys.stderr.flush() continue return resp def test_db_access(self, db_config): try: db = DB(db_config) except Exception as e: Log().error(f"failed to initialise the database: {str(e)}") sys.exit(1) try: if db_config['driver'] == 'mysql': resp = db.query("show tables") else: resp = db.query("pragma table_info(`provider`)") except Exception as e: Log().error(f"Failed to connect to the database: {str(e)}") sys.exit(1) def get_or_create_merchant(self, mo_merchant): if 'metadata' in mo_merchant and 'website' in mo_merchant['metadata']: website = mo_merchant['metadata']['website'] else: website = None merchant_id = mo_merchant['id'] mo_merchant['merchant_id'] = mo_merchant['id'] mo_merchant.pop('id') mo_address = mo_merchant.pop('address')
#!/usr/bin/env python3 PROVIDER = 'Monzo' class MonzoSync: def __init__(self, no_init=False): homedir = pwd.getpwuid(os.getuid()).pw_dir self.monzo_dir = f"{homedir}/.monzo" if not os.path.exists(self.monzo_dir): os.mkdir(self.monzo_dir, 0o755) self.config_file = f"{self.monzo_dir}/config.yaml" self.token_file = f"{self.monzo_dir}/tokens" if no_init: return Config() self.api = MonzoAPI() self.db = DB() self.provider = self.get_provider() def setup(self): print("\n========================") print("Monzo Utils Setup Wizard") print("========================\n") print("Requirements:\n") print("1) You must have created an OAuth client here: https://developers.monzo.com/apps/new") print(" Note: confidentiality must be set to Confidential\n") print("2) The database (MySQL/MariaDB or SQLite3) must be created and ready (see README.md)\n") print("3) The machine we are running on must be reachable on a known port from the internet.") print(" The webserver must be configured with the CGI script to capture the oauth tokens.") print(" This is only required during setup for the initial oauth authentication flow.") print(" Once this is complete and the tokens are stored this can be removed.\n") self.prompt_continue() if os.path.exists(self.config_file): sys.stdout.write(f"\nWARNING! Config file already exists at: {self.config_file}\n\n") sys.stdout.write("If we continue this will be erased.\n\n") self.prompt_continue() sys.stdout.write("\n") sys.stdout.write("Which database do you want to use?\n\n") sys.stdout.write("1. MySQL/MariaDB (recommended)\n") sys.stdout.write("2. SQLite3\n\n") while 1: db_backend = self.prompt_input('DB choice') if db_backend in ['1','2']: break if db_backend == '1': mysql_host = self.prompt_input('MySQL host', '127.0.0.1') mysql_port = self.prompt_input('MySQL port', '3306', False, 'int') mysql_db = self.prompt_input('MySQL database', 'monzo') mysql_user = self.prompt_input('MySQL username', 'monzo') mysql_password = self.prompt_input('MySQL password', 'monzo') db = { 'driver': 'mysql', 'host': mysql_host, 'port': mysql_port, 'user': mysql_user, 'password': mysql_password, 'database': mysql_db } else: db = { 'driver': 'sqlite', 'path': f"{self.monzo_dir}/data.db" } self.test_db_access(db) sys.stdout.write("\n") client_id = self.prompt_input('Monzo Client ID') client_secret = self.prompt_input('Monzo Client Secret') redirect_url = self.prompt_input('Monzo Client redirect URL') sys.stdout.write("Enter the path where the CGI script will store the token file:\n") token_path = self.prompt_input('Token path', '/var/www/monzo/token') sys.stdout.write("\nIf the auth token expires or stops working the sync script can send\n") sys.stdout.write("an email to notify you. Enter this email below or leave blank if not required.\n") email = self.prompt_input('Email', None, True) Config({ 'oauth_token_file': token_path, 'db': db, 'client_id': client_id, 'client_secret': client_secret, 'redirect_url': redirect_url, 'email': email }) Config().save() self.__init__() self.scan_accounts() sys.stdout.write("Performing initial transaction sync ...\n\n") sys.stdout.flush() self.sync(days=89) sys.stdout.write("\nSetup complete!\n\n") def scan_accounts(self): sys.stdout.write("\nFinding accounts...\n\n") accounts = self.api.accounts() found_accounts = [] for account in accounts: if account.balance is None: continue if 'accounts' in Config().keys and account.account_id in Config().accounts: continue if 'Joint account between' in account.description: account_type = 'Joint Current Account' else: account_type = account.account_type() print(f" id: {account.account_id}") print(f" balance: £{account.balance.balance/100:.2f}") print(f"description: {account.description}") print(f" type: {account_type}") sys.stdout.write("\n") resp = self.prompt_continue('Sync this account? [y/N] ', True) if resp == 'n': continue account_name = self.prompt_input('name for this account') if 'accounts' not in Config().keys: Config().set('accounts', {}) Config().accounts[account.account_id] = { 'name': account_name } if account_type == 'Flex': Config().accounts[account.account_id]['credit_limit'] = self.prompt_input('credit limit', None, False, 'int') else: Config().accounts[account.account_id]['sortcode'] = self.prompt_input('sort code') Config().accounts[account.account_id]['account_no'] = self.prompt_input('account no') sys.stdout.write("\n") Config().save() def prompt_continue(self, prompt='Continue? [y/N] ', boolean=False): while 1: sys.stdout.write(prompt) sys.stdout.flush() resp = sys.stdin.readline().rstrip().lower() if resp == 'n': if boolean: return False print("\nStopping at user request.\n") sys.exit(0) if resp == 'y': break return True def prompt_input(self, prompt, default=None, none_allowed=False, validation=None): while 1: if default is None: sys.stdout.write(f"Enter {prompt}: ") else: sys.stdout.write(f"Enter {prompt} [{default}]: ") sys.stdout.flush() resp = sys.stdin.readline().rstrip() if len(resp) == 0: if default is None and none_allowed is False: continue resp = default if validation == 'int' and resp is not None: try: resp = int(resp) except: sys.stderr.write("\nerror: value must be an integer\n\n") sys.stderr.flush() continue return resp def test_db_access(self, db_config): try: db = DB(db_config) except Exception as e: Log().error(f"failed to initialise the database: {str(e)}") sys.exit(1) try: if db_config['driver'] == 'mysql': resp = db.query("show tables") else: resp = db.query("pragma table_info(`provider`)") except Exception as e: Log().error(f"Failed to connect to the database: {str(e)}") sys.exit(1) def get_or_create_merchant(self, mo_merchant): if 'metadata' in mo_merchant and 'website' in mo_merchant['metadata']: website = mo_merchant['metadata']['website'] else: website = None merchant_id = mo_merchant['id'] mo_merchant['merchant_id'] = mo_merchant['id'] mo_merchant.pop('id') mo_address = mo_merchant.pop('address')
merchant = Merchant().find_by_merchant_id(merchant_id)
6
2023-11-05 12:48:18+00:00
12k
WolfgangFahl/dcm
dcm/dcm_chart.py
[ { "identifier": "CompetenceElement", "path": "dcm/dcm_core.py", "snippet": "class CompetenceElement:\n \"\"\"\n A base class representing a generic competence element with common properties.\n\n Attributes:\n name (str): The name of the competence element.\n id (Optional[str]): An optional identifier for the competence element will be set to the name if id is None.\n url (Optional[str]): An optional URL for more information about the competence element.\n description (Optional[str]): An optional description of the competence element.\n color_code (str): A string representing a color code associated with the competence element.\n \"\"\"\n\n name: str\n id: Optional[str] = None\n url: Optional[str] = None\n description: Optional[str] = None\n color_code: Optional[str] = None\n\n def __post_init__(self):\n # Set the id to the the slug of the name if id is None\n if self.id is None:\n self.id = slugify(self.name)\n\n def as_html(self) -> str:\n \"\"\"\n convert me to html\n\n Returns:\n str: html markup\n \"\"\"\n html = f\"<h2>{self.name}</h2>\"\n if self.description:\n desc_html = markdown2.markdown(\n self.description, extras=[\"fenced-code-blocks\", \"tables\", \"spoiler\"]\n )\n html = html + \"\\n\" + desc_html\n return html\n\n def to_svg_node_config(self, url: str = None, **kwargs) -> SVGNodeConfig:\n \"\"\"\n convert me to an SVGNode Configuration\n\n Args:\n url(str): the url to use for clicking this svg node - if None use\n my configured url\n \"\"\"\n if url is None:\n url = self.url\n element_type = f\"{self.__class__.__name__}\"\n comment = f\"{element_type}:{self.description}\"\n svg_node_config = SVGNodeConfig(\n element_type=f\"{element_type}\",\n id=f\"{self.id}\",\n url=url,\n fill=self.color_code,\n title=self.name,\n comment=comment,\n **kwargs,\n )\n return svg_node_config" }, { "identifier": "CompetenceFacet", "path": "dcm/dcm_core.py", "snippet": "class CompetenceFacet(CompetenceElement):\n \"\"\"\n Represents a specific facet of a competence aspect, inheriting from CompetenceElement.\n\n This class can include additional properties or methods specific to a competence facet.\n \"\"\"" }, { "identifier": "CompetenceTree", "path": "dcm/dcm_core.py", "snippet": "class CompetenceTree(CompetenceElement, YamlAble[\"CompetenceTree\"]):\n \"\"\"\n Represents the entire structure of competencies, including various aspects and levels.\n\n Attributes:\n competence_aspects (List[CompetenceAspect]): A list of CompetenceAspect objects.\n competence_levels (List[CompetenceLevel]): A list of CompetenceLevel objects representing the different levels in the competence hierarchy.\n element_names (Dict[str, str]): A dictionary holding the names for tree, aspects, facets, and levels. The key is the type (\"tree\", \"aspect\", \"facet\", \"level\").\n \"\"\"\n\n lookup_url: Optional[str] = None\n aspects: List[CompetenceAspect] = field(default_factory=list)\n levels: List[CompetenceLevel] = field(default_factory=list)\n element_names: Dict[str, str] = field(default_factory=dict)\n\n def __post_init__(self):\n \"\"\"\n initalize the path variables of my hierarchy\n \"\"\"\n super().__post_init__()\n self.path = self.id\n # Loop through each competence aspect and set their paths and parent references\n for aspect in self.aspects:\n aspect.competence_tree = self\n aspect.path = f\"{self.id}/{aspect.id}\"\n for area in aspect.areas:\n area.competence_tree = self\n area.aspect = aspect\n area.path = f\"{self.id}/{aspect.id}/{area.id}\"\n for facet in area.facets:\n facet.competence_tree = self\n facet.area = area\n facet.path = f\"{self.id}/{aspect.id}/{area.id}/{facet.id}\"\n\n @classmethod\n def required_keys(cls) -> Tuple:\n keys = {\"name\", \"id\", \"url\", \"description\", \"element_names\"}\n return keys\n\n def lookup_by_path(\n self, path: str, lenient: bool = True\n ) -> Optional[CompetenceElement]:\n \"\"\"\n Look up and return a competence element (tree,aspect of facet)\n based on the given path.\n\n The path is expected to be in the format \"tree_id/aspect_id/facet_id\".\n This method parses the path and retrieves the corresponding competence aspect or facet.\n\n Args:\n path (str): The path in the format \"tree_id/aspect_id/facet_id\".\n\n lenient(bool): if not lenient raise Exceptions for invalid paths and ids\n Returns:\n Optional[CompetenceElement]: The competence aspect or facet corresponding to the given path.\n \"\"\"\n\n def handle_error(msg):\n if not lenient:\n raise ValueError(msg)\n\n parts = path.split(\"/\")\n if len(parts) < 1:\n return None\n\n tree_id = parts[0]\n if tree_id != self.id:\n handle_error(f\"invalid tree_id for lookup {tree_id}\")\n return None\n if len(parts) == 1:\n return self\n if len(parts) > 1:\n aspect_id = parts[1]\n # Retrieve the aspect\n aspect = next((aspect for aspect in self.aspects if aspect.id==aspect_id), None)\n if aspect:\n if len(parts) == 2:\n return aspect\n if len(parts) > 2:\n area_id = parts[2]\n area = next((area for area in aspect.areas if area.id == area_id), None)\n if area:\n if len(parts) == 3:\n return area\n if len(parts) > 3:\n facet_id = parts[3]\n facet = next(\n (facet for facet in area.facets if facet.id == facet_id), None\n )\n if facet:\n return facet\n handle_error(f\"invalid path for lookup {path}\")\n return None\n\n def to_pretty_json(self):\n \"\"\"\n Converts the CompetenceTree object to a pretty JSON string, handling null values.\n \"\"\"\n json_str = self.to_json()\n json_dict = json.loads(json_str)\n\n def remove_none_values(data):\n \"\"\"\n Recursively removes keys with None values from a dictionary, list, or nested structure.\n \"\"\"\n if isinstance(data, dict):\n return {\n k: remove_none_values(v) for k, v in data.items() if v is not None\n }\n elif isinstance(data, list):\n return [remove_none_values(item) for item in data]\n return data\n\n none_free_dict = remove_none_values(json_dict)\n null_free_json_str = json.dumps(none_free_dict, indent=2)\n return null_free_json_str\n\n def add_legend(self, svg: SVG) -> None:\n \"\"\"\n Add a legend to the SVG explaining the color codes for levels and aspects.\n Args:\n svg (SVG): The SVG object to which the legend will be added.\n \"\"\"\n # Starting x position for the legends, starting 10 pixels from the left edge\n x_start = 10\n # y position for the legends, starting 20 pixels from the bottom edge\n y = svg.config.total_height - svg.config.legend_height + 20\n # Width and height of each legend color box\n box_width, box_height = 30, 20\n # Padding between legend items and between the color box and the text\n padding = 5\n\n # Add the competence level legend\n level_items = [(level.color_code, level.name) for level in self.levels]\n svg.add_legend_column(\n level_items,\n self.element_names.get(\"level\", \"Level\"),\n x_start,\n y,\n box_width,\n box_height,\n )\n\n # Calculate the x position for the aspect legend based on the width of the level legend\n x_aspect_start = (\n x_start\n + box_width\n + padding\n + max(svg.get_text_width(level.name) for level in self.levels)\n + padding\n )\n\n # Add the competence aspect legend\n aspect_items = [(aspect.color_code, aspect.name) for aspect in self.aspects]\n svg.add_legend_column(\n aspect_items,\n self.element_names.get(\"aspect\", \"Aspect\"),\n x_aspect_start,\n y,\n box_width,\n box_height,\n )" }, { "identifier": "DynamicCompetenceMap", "path": "dcm/dcm_core.py", "snippet": "class DynamicCompetenceMap:\n \"\"\"\n a visualization of a competence map\n \"\"\"\n\n def __init__(self, competence_tree: CompetenceTree):\n \"\"\"\n constructor\n \"\"\"\n self.competence_tree = competence_tree\n self.svg = None\n\n @property\n def main_id(self):\n main_id = self.competence_tree.id\n return main_id\n\n @classmethod\n def examples_path(cls) -> str:\n # the root directory (default: examples)\n path = os.path.join(os.path.dirname(__file__), \"../dcm_examples\")\n path = os.path.abspath(path)\n return path\n\n @classmethod\n def get_example_dcm_definitions(\n cls,\n markup: str = \"json\",\n required_keys: Optional[Tuple] = None,\n as_text: bool = True,\n ) -> dict:\n \"\"\"\n Retrieve example Dynamic Competence Map (DCM) definitions from files in the specified markup format (either JSON or YAML).\n\n Args:\n markup (str): The markup format of the input files. Defaults to 'json'. Supported values are 'json' and 'yaml'.\n required_keys (Optional[Tuple]): A tuple of keys required to validate the data. If not provided, all keys will be considered valid.\n as_text (bool): If True, returns the file content as text; if False, returns parsed data. Defaults to True.\n\n Returns:\n dict: A dictionary where each key is the prefix of the file name and the value is the file content as text or parsed data, depending on the value of 'as_text'.\n\n Raises:\n Exception: If there's an error in reading or parsing the file, or if the file does not meet the required validation criteria.\n \"\"\"\n example_dcm_defs = {}\n file_ext = f\".{markup}\"\n examples_path = cls.examples_path()\n for dirpath, _dirnames, filenames in os.walk(examples_path):\n for filename in filenames:\n if filename.endswith(file_ext):\n filepath = os.path.join(dirpath, filename)\n with open(filepath, \"r\") as definition_file:\n file_prefix = filename.replace(file_ext, \"\")\n definition_text = definition_file.read()\n try:\n definition_data = cls.parse_markup(definition_text, markup)\n if cls.is_valid_definition(definition_data, required_keys):\n if as_text:\n example_dcm_defs[file_prefix] = definition_text\n else:\n example_dcm_defs[file_prefix] = definition_data\n except Exception as ex:\n cls.handle_markup_issue(\n filename, definition_text, ex, markup\n )\n return example_dcm_defs\n\n @classmethod\n def parse_markup(cls, text: str, markup: str) -> Union[dict, list]:\n \"\"\"\n Parse the given text as JSON or YAML based on the specified markup type.\n\n Args:\n text (str): The string content to be parsed.\n markup (str): The type of markup to use for parsing. Supported values are 'json' and 'yaml'.\n\n Returns:\n Union[dict, list]: The parsed data, which can be either a dictionary or a list, depending on the content.\n\n Raises:\n ValueError: If an unsupported markup format is specified.\n \"\"\"\n if markup == \"json\":\n data=json.loads(text)\n return data\n elif markup == \"yaml\":\n data=yaml.safe_load(text)\n return data\n else:\n raise ValueError(f\"Unsupported markup format: {markup}\")\n\n @classmethod\n def handle_markup_issue(cls, name: str, definition_string: str, ex, markup: str):\n if isinstance(ex, JSONDecodeError):\n lines = definition_string.splitlines() # Split the string into lines\n err_line = lines[ex.lineno - 1] # JSONDecodeError gives 1-based lineno\n pointer = (\n \" \" * (ex.colno - 1) + \"^\"\n ) # Create a pointer string to indicate the error position\n error_message = (\n f\"{name}:JSON parsing error on line {ex.lineno} column {ex.colno}:\\n\"\n f\"{err_line}\\n\"\n f\"{pointer}\\n\"\n f\"{ex.msg}\"\n )\n raise ValueError(error_message) # Raise a new exception with this message\n else:\n error_message = f\"error in {name}: {str(ex)}\"\n raise ValueError(error_message)\n\n @classmethod\n def is_valid_definition(cls, definition_data, required_keys: Tuple):\n return all(key in definition_data for key in required_keys)\n\n @classmethod\n def get_examples(cls, content_class=CompetenceTree, markup: str = \"json\") -> dict:\n examples = {}\n for name, definition_string in cls.get_example_dcm_definitions(\n required_keys=content_class.required_keys(), markup=markup\n ).items():\n example = cls.from_definition_string(\n name, definition_string, content_class, markup=markup\n )\n # check the type of the example\n example_id = example.main_id\n examples[example_id] = example\n return examples\n\n @classmethod\n def from_definition_string(\n cls, name: str, definition_string: str, content_class, markup: str = \"json\"\n ) -> Any:\n \"\"\"\n Load a DynamicCompetenceMap or Learner instance from a definition string (either JSON or YAML).\n\n Args:\n name (str): A name identifier for the data source.\n definition_string (str): The string content of the definition.\n content_class (dataclass_json): The class which will be instantiated with the parsed data.\n markup (str): The markup format of the data. Defaults to 'json'. Supported values are 'json' and 'yaml'.\n\n Returns:\n DynamicCompetenceMap: An instance of DynamicCompetenceMap loaded with the parsed data.\n\n Raises:\n ValueError: If there's an error in parsing the data.\n \"\"\"\n try:\n data = cls.parse_markup(definition_string, markup)\n content = content_class.from_dict(data)\n if isinstance(content, CompetenceTree):\n return DynamicCompetenceMap(content)\n else:\n return content\n except Exception as ex:\n cls.handle_markup_issue(name, definition_string, ex, markup)" }, { "identifier": "Learner", "path": "dcm/dcm_core.py", "snippet": "class Learner:\n \"\"\"\n A learner with achievements.\n Attributes:\n learner_id (str): Identifier for the learner.\n achievements (Dict[str, List[Achievement]]):\n A dictionary where each key is a competence element identifier\n and the value is a list of Achievement instances for that tree.\n \"\"\"\n\n learner_id: str\n achievements: Optional[List[Achievement]] = field(default=None)\n\n def __post_init__(self):\n self.achievements_by_path = {}\n if self.achievements:\n for achievement in self.achievements:\n self.achievements_by_path[achievement.path] = achievement\n\n @classmethod\n def required_keys(cls):\n keys = {\"achievements\"}\n return keys\n\n @property\n def main_id(self):\n main_id = self.learner_id\n return main_id\n\n def add_achievement(self, new_achievement):\n self.achievements.append(new_achievement)\n self.achievements_by_path[new_achievement.path] = new_achievement\n\n def get_competence_tree_ids(self) -> List[str]:\n \"\"\"\n Get all unique competence tree IDs of my achievements.\n\n Returns:\n List[str]: A list of unique competence tree IDs.\n \"\"\"\n # Assuming that the learner's achievements are stored in a list called self.achievements\n # You can modify this part according to your actual data structure.\n\n # Create a set to store unique competence tree IDs\n unique_tree_ids = set()\n\n # Iterate through the learner's achievements\n for achievement in self.achievements:\n # Assuming each achievement has a tree_id attribute\n tree_id = achievement.tree_id\n\n # Add the tree_id to the set\n unique_tree_ids.add(tree_id)\n\n # Convert the set to a list and return\n return list(unique_tree_ids)" }, { "identifier": "SVG", "path": "dcm/svg.py", "snippet": "class SVG:\n \"\"\"\n Class for creating SVG drawings.\n\n Attributes:\n config (SVGConfig): Configuration for the SVG drawing.\n \"\"\"\n\n def __init__(self, config: SVGConfig = None):\n \"\"\"\n Initialize SVG object with given configuration.\n\n Args:\n config (SVGConfig): Configuration for SVG generation.\n \"\"\"\n self.config = config if config else SVGConfig()\n self.width = self.config.width\n self.height = self.config.height\n self.elements = []\n self.indent = self.config.indent\n\n def get_svg_style(self) -> str:\n \"\"\"\n Define styles for SVG elements.\n\n Returns:\n str: String containing style definitions for SVG.\n \"\"\"\n return (\n f\"{self.indent}<style>\\n\"\n f\"{self.indent * 2}.hoverable {{ cursor: pointer; fill-opacity: 1; stroke: black; stroke-width: 0.5; }}\\n\"\n f\"{self.indent * 2}.hoverable:hover {{ fill-opacity: 0.7; }}\\n\"\n f\"{self.indent * 2}.selected {{ fill-opacity: 0.5; stroke: blue; stroke-width: 1.5;}}\\n\"\n f\"{self.indent * 2}.popup {{\\n\"\n f\"{self.indent * 3}border: 2px solid black;\\n\"\n f\"{self.indent * 3}border-radius: 15px;\\n\"\n f\"{self.indent * 3}overflow: auto;\\n\" # changed to 'auto' to allow scrolling only if needed\n f\"{self.indent * 3}background: white;\\n\"\n f\"{self.indent * 3}box-sizing: border-box;\\n\" # ensures padding and border are included\n f\"{self.indent * 3}padding: 10px;\\n\" # optional padding inside the popup\n f\"{self.indent * 3}height: 100%;\\n\" # adjusts height relative to foreignObject\n f\"{self.indent * 3}width: 100%;\\n\" # adjusts width relative to foreignObject\n f\"{self.indent * 2}}}\\n\"\n f\"{self.indent * 2}.close-btn {{\\n\" # style for the close button\n f\"{self.indent * 3}cursor: pointer;\\n\"\n f\"{self.indent * 3}position: absolute;\\n\"\n f\"{self.indent * 3}top: 0;\\n\"\n f\"{self.indent * 3}right: 0;\\n\"\n f\"{self.indent * 3}padding: 5px;\\n\"\n f\"{self.indent * 3}font-size: 20px;\\n\"\n f\"{self.indent * 3}user-select: none;\\n\" # prevents text selection on click\n f\"{self.indent * 2}}}\\n\"\n f\"{self.indent}</style>\\n\"\n )\n\n def get_text_width(self, text: str) -> int:\n \"\"\"\n Estimate the width of a text string in the SVG based on the font size and font name.\n\n Args:\n text (str): The text content.\n\n Returns:\n int: The estimated width of the text in pixels.\n \"\"\"\n average_char_width_factor = 0.6\n average_char_width = average_char_width_factor * self.config.font_size\n return int(average_char_width * len(text))\n\n def add_element(self, element: str, level: int = 1, comment: str = None):\n \"\"\"\n Add an SVG element to the elements list with proper indentation.\n\n Args:\n element (str): SVG element to be added.\n level (int): Indentation level for the element.\n comment(str): optional comment to add\n \"\"\"\n base_indent = f\"{self.indent * level}\"\n if comment:\n indented_comment = f\"{base_indent}<!-- {comment} -->\\n\"\n self.elements.append(indented_comment)\n indented_element = f\"{base_indent}{element}\\n\"\n self.elements.append(indented_element)\n\n def add_circle(self, config: SVGNodeConfig):\n \"\"\"\n Add a circle element to the SVG, optionally making it clickable and with a hover effect.\n\n Args:\n config (SVGNodeConfig): Configuration for the circle element.\n \"\"\"\n color = config.fill if config.fill else self.config.default_color\n circle_element = f'<circle cx=\"{config.x}\" cy=\"{config.y}\" r=\"{config.width}\" fill=\"{color}\" class=\"{config.element_class}\" />'\n\n # If URL is provided, wrap the circle in an anchor tag to make it clickable\n if config.url:\n circle_indent = self.indent * (config.indent_level + 1)\n circle_element = f\"\"\"<a xlink:href=\"{config.url}\" target=\"_blank\">\n{circle_indent}{circle_element}\n</a>\"\"\"\n\n # Use add_group to add the circle element with proper indentation\n self.add_group(\n circle_element,\n group_id=config.id,\n group_class=config.element_class,\n level=config.indent_level,\n comment=config.comment,\n )\n\n def add_rectangle(\n self,\n x: int,\n y: int,\n width: int,\n height: int,\n fill: str = None,\n indent_level: int = 1,\n ):\n \"\"\"\n Add a rectangle element to the SVG.\n\n Args:\n x (int): X-coordinate of the rectangle's top-left corner.\n y (int): Y-coordinate of the rectangle's top-left corner.\n width (int): Width of the rectangle.\n height (int): Height of the rectangle.\n fill (str, optional): Fill color of the rectangle. Defaults to the default color.\n indent_level (int): Indentation level for the rectangle.\n \"\"\"\n color = fill if fill else self.config.default_color\n rect = f'{self.indent * 3}<rect x=\"{x}\" y=\"{y}\" width=\"{width}\" height=\"{height}\" fill=\"{color}\" />\\n'\n self.add_element(rect)\n\n def add_legend_column(\n self,\n items: List[Tuple[str, str]],\n title: str,\n x: int,\n y: int,\n width: int,\n height: int,\n ) -> None:\n \"\"\"\n Add a legend column to the SVG.\n\n Args:\n items (List[Tuple[str, str]]): List of tuples with color code and label.\n title (str): Title of the legend.\n x (int): X position of the legend.\n y (int): Y position of the legend.\n width (int): Width of the color box in the legend.\n height (int): Height of each legend item.\n \"\"\"\n self.add_text(x, y - height, title, font_weight=\"bold\")\n for index, (color, label) in enumerate(items):\n self.add_rectangle(x, y + index * (height + 5), width, height, color)\n self.add_text(x + width + 10, y + index * (height + 5) + height / 2, label)\n\n def add_text(\n self,\n x: int,\n y: int,\n text: str,\n fill: str = \"black\",\n font_weight: str = \"normal\",\n text_anchor: str = \"start\",\n ) -> None:\n \"\"\"\n Add text to the SVG.\n\n Args:\n x (int): X position of the text.\n y (int): Y position of the text.\n text (str): Text content.\n fill (str, optional): Fill color of the text. Defaults to \"black\".\n font_weight (str, optional): Font weight (normal, bold, etc.). Defaults to \"normal\".\n text_anchor (str, optional): Text alignment (start, middle, end). Defaults to \"start\".\n \"\"\"\n escaped_text = html.escape(text)\n text_element = (\n f'<text x=\"{x}\" y=\"{y}\" fill=\"{fill}\" '\n f'font-family=\"{self.config.font}\" '\n f'font-size=\"{self.config.font_size}\" '\n f'font-weight=\"{font_weight}\" '\n f'text-anchor=\"{text_anchor}\">'\n f\"{escaped_text}</text>\\n\"\n )\n self.add_element(text_element)\n\n def add_group(\n self,\n content: str,\n group_id: str = None,\n group_class: str = None,\n level: int = 1,\n comment: str = None,\n ):\n \"\"\"\n Add a group of elements to the SVG.\n\n Args:\n content (str): SVG content to be grouped.\n group_id (str, optional): ID for the group.\n group_class (str, optional): Class for the group.\n level (int): Indentation level for the group.\n \"\"\"\n group_attrs = []\n if group_id:\n group_attrs.append(f'id=\"{group_id}\"')\n if group_class:\n group_attrs.append(f'class=\"{group_class}\"')\n attrs_str = \" \".join(group_attrs)\n indented_content = \"\\n\".join(\n f\"{self.indent * (level + 1)}{line}\" for line in content.strip().split(\"\\n\")\n )\n group_str = f\"{self.indent * level}<g {attrs_str}>\\n{indented_content}\\n{self.indent * level}</g>\\n\"\n self.add_element(group_str, level=level, comment=comment)\n\n def add_pie_segment(\n self,\n cx: int,\n cy: int,\n radius: int,\n start_angle_deg: float,\n end_angle_deg: float,\n color: str,\n segment_name: str,\n segment_id: str = None,\n segment_class: str = None,\n segment_url: str = None,\n ) -> None:\n \"\"\"\n Add a pie segment to the SVG.\n\n Args:\n cx (int): X-coordinate of the center of the pie.\n cy (int): Y-coordinate of the center of the pie.\n radius (int): Radius of the pie.\n start_angle_deg (float): Start angle of the segment in degrees.\n end_angle_deg (float): End angle of the segment in degrees.\n color (str): Fill color of the segment.\n segment_name (str): Name of the segment, used for the tooltip.\n segment_id (str, optional): ID for the segment group. Defaults to None.\n segment_class (str, optional): Class for the segment group. Defaults to None.\n segment_url (str, optional): URL linked to the segment. Defaults to None.\n\n Returns:\n None\n \"\"\"\n if color is None:\n color = self.config.default_color\n # Convert angles from degrees to radians for calculations\n start_angle_rad = radians(start_angle_deg)\n end_angle_rad = radians(end_angle_deg)\n\n # Calculate the start and end points\n start_x = cx + radius * cos(start_angle_rad)\n start_y = cy + radius * sin(start_angle_rad)\n end_x = cx + radius * cos(end_angle_rad)\n end_y = cy + radius * sin(end_angle_rad)\n\n # Determine if the arc should be drawn as a large-arc (values >= 180 degrees)\n large_arc_flag = \"1\" if end_angle_deg - start_angle_deg >= 180 else \"0\"\n\n # Create the path for the pie segment without indentation\n path_str = (\n f\"M {cx} {cy} \"\n f\"L {start_x} {start_y} \"\n f\"A {radius} {radius} 0 {large_arc_flag} 1 {end_x} {end_y} \"\n \"Z\"\n )\n\n # Assemble the path and title elements\n path_element = f'<path d=\"{path_str}\" fill=\"{color}\" />\\n'\n escaped_title = html.escape(segment_name) # Escape special characters\n\n title_element = f\"<title>{escaped_title}</title>\"\n\n # Combine path and title into one string without adding indentation here\n group_content = f\"{path_element}{title_element}\"\n\n # If an URL is provided, wrap the content within an anchor\n if segment_url:\n group_content = (\n f'<a xlink:href=\"{segment_url}\" target=\"_blank\">\\n{group_content}</a>\\n'\n )\n\n # Use add_group to add the pie segment with proper indentation\n self.add_group(\n group_content, group_id=segment_id, group_class=segment_class, level=2\n )\n\n def add_donut_segment(\n self,\n config: SVGNodeConfig,\n segment: DonutSegment,\n ) -> None:\n \"\"\"\n Add a donut segment to the SVG.\n\n Args:\n config (SVGNodeConfig): Configuration for the donut segment.\n start_angle_deg (float): Start angle of the segment in degrees.\n end_angle_deg (float): End angle of the segment in degrees.\n \"\"\"\n cx, cy = config.x, config.y\n color = config.fill if config.fill else self.config.default_color\n\n if color is None:\n color = self.config.default_color\n # Convert angles from degrees to radians for calculations\n start_angle_rad = radians(segment.start_angle)\n end_angle_rad = radians(segment.end_angle)\n\n # Calculate the start and end points for the outer radius\n start_x_outer = cx + segment.outer_radius * cos(start_angle_rad)\n start_y_outer = cy + segment.outer_radius * sin(start_angle_rad)\n end_x_outer = cx + segment.outer_radius * cos(end_angle_rad)\n end_y_outer = cy + segment.outer_radius * sin(end_angle_rad)\n\n # Calculate the start and end points for the inner radius\n start_x_inner = cx + segment.inner_radius * cos(start_angle_rad)\n start_y_inner = cy + segment.inner_radius * sin(start_angle_rad)\n end_x_inner = cx + segment.inner_radius * cos(end_angle_rad)\n end_y_inner = cy + segment.inner_radius * sin(end_angle_rad)\n\n # Determine if the arc should be drawn as a large-arc (values >= 180 degrees)\n large_arc_flag = \"1\" if segment.end_angle - segment.start_angle >= 180 else \"0\"\n\n # Create the path for the pie segment without indentation\n path_str = (\n f\"M {start_x_inner} {start_y_inner} \" # Move to start of inner arc\n f\"L {start_x_outer} {start_y_outer} \" # Line to start of outer arc\n f\"A {segment.outer_radius} {segment.outer_radius} 0 {large_arc_flag} 1 {end_x_outer} {end_y_outer} \" # Outer arc\n f\"L {end_x_inner} {end_y_inner} \" # Line to end of inner arc\n f\"A {segment.inner_radius} {segment.inner_radius} 0 {large_arc_flag} 0 {start_x_inner} {start_y_inner} \" # Inner arc (reverse)\n \"Z\"\n )\n\n # Assemble the path and title elements\n path_element = f'<path d=\"{path_str}\" fill=\"{color}\" />\\n'\n escaped_title = html.escape(config.title) # Escape special characters\n\n title_element = f\"<title>{escaped_title}</title>\"\n\n # Combine path and title into one string without adding indentation here\n group_content = f\"{path_element}{title_element}\"\n\n # Check if the segment should be shown as a popup\n if config.show_as_popup:\n # Add JavaScript to handle popup logic\n onclick_action = f\"onclick=\\\"showPopup('{config.url}', evt,this)\\\"\"\n group_content = f\"<g {onclick_action}>{group_content}</g>\"\n elif config.url:\n # Regular link behavior\n group_content = (\n f'<a xlink:href=\"{config.url}\" target=\"_blank\">{group_content}</a>'\n )\n\n # Use add_group to add the pie segment with proper indentation\n self.add_group(\n group_content,\n group_id=config.id,\n group_class=config.element_class,\n level=2,\n comment=config.comment,\n )\n\n def get_java_script(self) -> str:\n \"\"\"\n get the java script code for interactive behavior\n \"\"\"\n popup_script = \"\"\"\n <script>\n function showPopup(url, evt,element) {\n // show a Popup fetching html content from the given url\n // for the given element\n // Handle the selection of the popup element\n selectPopupElement(element);\n var popup = document.getElementById('dcm-svg-popup');\n var iframe = document.getElementById('popup-iframe');\n var svgRect = evt.target.getBoundingClientRect();\n var svg = document.querySelector('svg');\n var svgPoint = svg.createSVGPoint();\n svgPoint.x = evt.clientX - svgRect.left;\n svgPoint.y = evt.clientY - svgRect.top;\n \n // Position the popup near the click event\n popup.setAttribute('x', svgPoint.x);\n popup.setAttribute('y', svgPoint.y);\n // Set the iframe src and make the popup visible\n iframe.setAttribute('src', url);\n popup.setAttribute('visibility', 'visible');\n }\n \n function selectPopupElement(element) {\n var popup = document.getElementById('dcm-svg-popup');\n \n // Deselect the current element if there is one\n if (popup.currentElement) {\n popup.currentElement.classList.remove('selected');\n }\n \n // Select the new element\n if (element) {\n element.classList.add('selected');\n popup.currentElement = element; // Update the reference to the currently selected element\n } else {\n popup.currentElement = null; // Clear the reference if no element is passed\n }\n }\n \n function closePopup() {\n var popup = document.getElementById('dcm-svg-popup');\n popup.setAttribute('visibility', 'hidden');\n // Deselect the element when the popup is closed\n selectPopupElement(null);\n }\n </script>\n \"\"\"\n return popup_script\n\n def get_svg_markup(self, with_java_script: bool = True) -> str:\n \"\"\"\n Generate the complete SVG markup.\n\n Args:\n with_java_script(bool): if True(default) the javascript code is included otherwise\n it's available via the get_java_script function\n\n Returns:\n str: String containing the complete SVG markup.\n \"\"\"\n # Get current date and time\n now = datetime.now()\n formatted_now = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n header = (\n f\"<!-- generated by dcm https://github.com/WolfgangFahl/dcm at {formatted_now} -->\\n\"\n f'<svg xmlns=\"http://www.w3.org/2000/svg\" '\n f'xmlns:xlink=\"http://www.w3.org/1999/xlink\" '\n f'width=\"{self.width}\" height=\"{self.config.total_height}\">\\n'\n )\n popup = \"\"\"\n <!-- Add a foreignObject for the popup -->\n<foreignObject id=\"dcm-svg-popup\" class=\"popup\" width=\"500\" height=\"354\" x=\"150\" y=\"260\" visibility=\"hidden\">\n <body xmlns=\"http://www.w3.org/1999/xhtml\">\n <!-- Content of your popup goes here -->\n <div class=\"popup\" style=\"background-color: white; border: 1px solid black; padding: 10px; box-sizing: border-box; width: 500px; height: 354px; position: relative;\">\n <span onclick=\"closePopup()\" class=\"close-btn\">ⓧ</span>\n <iframe id=\"popup-iframe\" width=\"100%\" height=\"100%\" frameborder=\"0\"></iframe>\n </div>\n </body>\n</foreignObject>\n\"\"\"\n\n styles = self.get_svg_style()\n body = \"\".join(self.elements)\n footer = \"</svg>\"\n java_script = self.get_java_script() if with_java_script else \"\"\n svg_markup = f\"{header}{java_script}{styles}{body}{popup}{footer}\"\n return svg_markup\n\n def save(self, filename: str):\n \"\"\"\n Save the SVG markup to a file.\n\n Args:\n filename (str): Filename to save the SVG markup.\n \"\"\"\n with open(filename, \"w\") as file:\n file.write(self.get_svg_markup())" }, { "identifier": "DonutSegment", "path": "dcm/svg.py", "snippet": "class DonutSegment:\n \"\"\"\n a donut segment\n \"\"\"\n\n inner_radius: float\n outer_radius: float\n start_angle: Optional[float] = 0.0\n end_angle: Optional[float] = 360.0" }, { "identifier": "SVGConfig", "path": "dcm/svg.py", "snippet": "class SVGConfig:\n \"\"\"\n Configuration class for SVG generation.\n\n Attributes:\n width (int): Width of the SVG canvas in pixels.\n height (int): Height of the SVG canvas in pixels.\n legend_height (int): Height reserved for the legend in pixels.\n font (str): Font family for text elements.\n font_size (int): Font size in points for text elements.\n indent (str): Indentation string, default is two spaces.\n default_color (str): Default color code for SVG elements.\n \"\"\"\n\n width: int = 600\n height: int = 600\n legend_height: int = 150\n font: str = \"Arial\"\n font_size: int = 12\n indent: str = \" \"\n default_color: str = \"#C0C0C0\"\n\n @property\n def total_height(self) -> int:\n \"\"\"\n Calculate total height of the SVG canvas including the legend.\n\n Returns:\n int: Total height of the SVG canvas.\n \"\"\"\n return self.height + self.legend_height" } ]
from dataclasses import dataclass from typing import List, Optional from dcm.dcm_core import ( CompetenceElement, CompetenceFacet, CompetenceTree, DynamicCompetenceMap, Learner, ) from dcm.svg import SVG, DonutSegment, SVGConfig
9,610
""" Created on 2024-01-12 @author: wf """ class DcmChart: """ a Dynamic competence map chart """ def __init__(self, dcm: DynamicCompetenceMap): """ Constructor """ self.dcm = dcm def generate_svg( self, filename: Optional[str] = None, learner: Optional[Learner] = None, config: Optional[SVGConfig] = None, ) -> str: """ Generate the SVG markup and optionally save it to a file. If a filename is given, the method will also save the SVG to that file. The SVG is generated based on internal state not shown here. Args: filename (str, optional): The path to the file where the SVG should be saved. Defaults to None. learner(Learner): the learner to show the achievements for config (SVGConfig, optional): The configuration for the SVG canvas and legend. Defaults to default values. Returns: str: The SVG markup. """ if config is None: config = SVGConfig() # Use default configuration if none provided svg_markup = self.generate_svg_markup( self.dcm.competence_tree, learner=learner, config=config ) if filename: self.save_svg_to_file(svg_markup, filename) return svg_markup def generate_donut_segment_for_element( self, svg: SVG, element: CompetenceElement, learner: Learner,
""" Created on 2024-01-12 @author: wf """ class DcmChart: """ a Dynamic competence map chart """ def __init__(self, dcm: DynamicCompetenceMap): """ Constructor """ self.dcm = dcm def generate_svg( self, filename: Optional[str] = None, learner: Optional[Learner] = None, config: Optional[SVGConfig] = None, ) -> str: """ Generate the SVG markup and optionally save it to a file. If a filename is given, the method will also save the SVG to that file. The SVG is generated based on internal state not shown here. Args: filename (str, optional): The path to the file where the SVG should be saved. Defaults to None. learner(Learner): the learner to show the achievements for config (SVGConfig, optional): The configuration for the SVG canvas and legend. Defaults to default values. Returns: str: The SVG markup. """ if config is None: config = SVGConfig() # Use default configuration if none provided svg_markup = self.generate_svg_markup( self.dcm.competence_tree, learner=learner, config=config ) if filename: self.save_svg_to_file(svg_markup, filename) return svg_markup def generate_donut_segment_for_element( self, svg: SVG, element: CompetenceElement, learner: Learner,
segment: DonutSegment,
6
2023-11-06 09:24:24+00:00
12k
fortelex/hiveline
hiveline/results/trace_plotter.py
[ { "identifier": "Place", "path": "hiveline/od/place.py", "snippet": "class Place():\n\n def __init__(self, place_name: str, year: str):\n '''\n Initialize the place object, load geographical shape and tiling\n Args:\n place_name (str): the place name (ex: 'Konstanz, Germany')\n year (str): the study year\n '''\n self.name = place_name\n self.year = year\n self.shape = ox.geocode_to_gdf(self.name)\n self.bbox = self.shape.envelope[0]\n self.get_tiles()\n self.zones = {}\n # this GeoDataFrame will store the origin destination stats\n self.data = self.tiles.copy()\n # mongo\n self.mongo_db = mongo.get_database()\n self.load_regions()\n \n def merge_places(self, new_name, place_names):\n '''\n Extend the current place with other places (reset place data)\n Args:\n new_name (str): the new place name\n place_names (list of str): the list of place names to add\n '''\n for p in place_names:\n other_place = Place(p, self.year)\n self.tiles = pd.concat([self.tiles, other_place.tiles], ignore_index=True)\n\n self.data = self.tiles.copy()\n self.name = new_name\n self.load_regions()\n\n def get_tiles(self, h3_resolution=8):\n '''\n Compute H3 tiling and select the tiles covering the place shape\n Args:\n h3_resolution (int, default=8): tiling resolution\n '''\n # Create an empty dataframe to write data into\n self.tiles = gpd.GeoDataFrame([], columns=['h3', 'geometry'])\n\n multipolygon = self.shape['geometry'][0]\n # multipolygon to list of polygons\n if multipolygon.geom_type == 'MultiPolygon':\n poly_list = [shapely.geometry.Polygon(poly.exterior.coords).__geo_interface__ for poly in multipolygon.geoms]\n elif multipolygon.geom_type == 'Polygon':\n poly_list = [multipolygon.__geo_interface__]\n else:\n raise Exception('city shape is neither a Polygon nor a MultiPolygon')\n \n for poly_geojson in poly_list:\n # Fill the dictionary with Resolution 8 H3 Hexagons\n h3_hexes = h3.polyfill_geojson(poly_geojson, h3_resolution)\n for h3_hex in h3_hexes:\n h3_geo_boundary = shapely.geometry.Polygon(\n h3.h3_to_geo_boundary(h3_hex, geo_json=True)\n )\n # Append results to dataframe\n self.tiles.loc[len(self.tiles)] = [\n h3_hex,\n h3_geo_boundary,\n ]\n # set coordinates reference system\n if self.tiles.crs == None:\n self.tiles = self.tiles.set_crs(self.shape.crs)\n\n # ensure h3 is int64\n self.tiles['h3'] = self.tiles['h3'].astype('int64')\n\n def merge_to_data(self, gdf):\n '''\n Update (or add) a new field to the data gdf\n Args:\n gdf (GeoDataFrame): the gdf to merge, must contains an 'h3' column\n '''\n if ('geometry' in gdf.columns):\n gdf = gdf.drop(columns='geometry')\n # remove field if already existing\n for field in gdf.drop(columns='h3').columns:\n if field in self.data.columns:\n self.data = self.data.drop(columns=field)\n # merge to data gdf\n self.data = self.data.merge(gdf, on='h3', how='left')\n\n def mongo_cached(collection, match_field_list, fields, extra_transformation=lambda x:x):\n '''\n Decorator to check if data is available in mongo instead of computing it\n (acts like a cache)\n Args:\n loading_function (function): function that loads data from file, outputs a DataFrame or GeoDataFrame\n collection (str): mongo db collection to search in\n match_field_list (dict): the dataframe field name to match with the mongodb field, ex: ['nuts3', 'nuts-3']\n fields (list of str): list of fields to retrieve from mongodb\n extra_transformation (function, default is identity): transform the df coming from mongo\n '''\n # 2 wrappers are needed to pass arguments to the decorator\n def wrapper1(loading_function):\n def wrapper2(self):\n # add year prefix to fields to retrieve\n fields_year = [self.year+'.'+f if f not in ['_id', 'nuts-3', 'shape'] else f for f in fields] \n # search fields in mongo, only for place regions\n match_ids = self.data[match_field_list[0]].to_list()\n result_df = mongo.search(self.mongo_db, collection, match_field_list[1], match_ids, fields_year)\n # call loading function if the search result is empty or incomplete\n if result_df.empty or len(result_df.columns)<2 or len(result_df) < len(match_ids):\n print('Data not in db, computing')\n # split in chunks that can be computed in one go\n chunk_size=300\n tiles_backup = self.tiles.copy()\n data_df = pd.DataFrame()\n for i in range(0, len(self.tiles), chunk_size):\n print('chunk', int(i/chunk_size))\n self.tiles = tiles_backup[i:i+chunk_size]\n chunk_df = loading_function(self)\n data_df = pd.concat([data_df, chunk_df])\n del chunk_df\n self.tiles = tiles_backup.copy()\n else:\n print('Data found in db')\n data_df = extra_transformation(result_df)\n # merge the data to local df\n self.merge_to_data(data_df)\n return wrapper2\n return wrapper1\n \n @mongo_cached(collection='tiles', match_field_list=['nuts3', 'nuts-3'], fields=['population'], extra_transformation=mongo.transform_from_mongo_extract_year)\n def load_population(self, median_imputation=True, gpkg_path=data_folder+'population_density/kontur_population_20231101.gpkg'):\n '''\n Load the population data in a GeoDataFrame and add it to self.data\n Args:\n median_imputation (boolean, default=True): whether or not to replace missing values with the median\n gpkg_path (str, default): the path to the gpkg data\n '''\n population_gdf = gpd.read_file(gpkg_path, bbox=self.shape)\n # string_to_h3 needed for h3.api.numpy_int (faster)\n population_gdf['h3'] = population_gdf['h3'].apply(h3.string_to_h3)\n\n # ensure h3 is int64\n population_gdf['h3'] = population_gdf['h3'].astype('int64')\n\n population_gdf = population_gdf[population_gdf['h3'].isin(\n self.tiles['h3'])]\n population_gdf = population_gdf.to_crs(self.shape.crs)\n\n # median imputation for missing values\n if median_imputation:\n no_data = self.tiles[~self.tiles['h3'].isin(\n population_gdf['h3'])].copy()\n no_data['population'] = population_gdf['population'].median()\n\n population_gdf = pd.concat([population_gdf, no_data])\n\n return population_gdf\n\n def plot_population(self):\n '''\n Plot the shape and the population density overlay\n '''\n if not 'population' in self.data.columns:\n print('loading population data')\n self.load_population()\n ax = self.shape.plot(color='white')\n ax.set_axis_off()\n self.data.plot(ax=ax, zorder=1, column='population')\n\n def get_zoning(self, multipolygon):\n '''\n Get zoning data from Open Street Map\n '''\n self.zones = {\n 'work_agricultural': ox.features_from_polygon(multipolygon, work_agricultural_tags),\n 'work_industrial': ox.features_from_polygon(multipolygon, work_industrial_tags),\n 'work_commercial': ox.features_from_polygon(multipolygon, work_commercial_tags),\n 'work_office': ox.features_from_polygon(multipolygon, work_office_tags),\n 'work_social': ox.features_from_polygon(multipolygon, work_social_tags),\n 'education': ox.features_from_polygon(multipolygon, education_tags),\n 'leisure': ox.features_from_polygon(multipolygon, leisure_tags),\n 'empty': ox.features_from_polygon(multipolygon, empty_tags),\n }\n\n # keep only points for office as the polygons are badly distributed\n self.zones['work_office'] = only_geo_points(self.zones['work_office'])\n\n # keep only polygons for buildings and industrial landuse due to significant overlap between points and buildings\n self.zones['work_industrial'] = only_geo_polygons(self.zones['work_industrial'])\n\n def get_zoning_noparkingland(self, multipolygon):\n '''\n Get zoning data from Open Street Map for no parking land\n '''\n self.zones['no_parking_land'] = ox.features_from_polygon(multipolygon, parking_tags)\n # keep only polygons for buildings and industrial landuse due to significant overlap between points and buildings\n self.zones['no_parking_land'] = only_geo_polygons(self.zones['no_parking_land'])\n \n def get_zoning_buildings(self, multipolygon, batch_nb):\n '''\n Get zoning data from Open Street Map for buildings\n Args:\n batch_nb (str): '1' or '2', the batch to get\n '''\n self.zones['buildings'+batch_nb] = ox.features_from_polygon(multipolygon, building_tags[batch_nb])\n # keep only polygons for buildings and industrial landuse due to significant overlap between points and buildings\n self.zones['buildings'+batch_nb] = only_geo_polygons(self.zones['buildings'+batch_nb])\n\n\n @mongo_cached(collection='tiles', match_field_list=['nuts3', 'nuts-3'], fields=['education', 'leisure', 'empty', 'work', 'building_density'], extra_transformation=mongo.transform_tiles_from_mongo)\n def load_zoning_data(self):\n '''\n Load the zoning data into the data gdf\n Measure the areas of zones of interest (work, education, leisure,...) within each tile\n '''\n # get all the tiles (in current chunk) geometries to a single multipolygon\n multipolygon = shapely.geometry.MultiPolygon(self.tiles['geometry'].to_list())\n # merge intersecting polygons\n multipolygon = multipolygon.buffer(0)\n self.get_zoning(multipolygon)\n self.get_zoning_noparkingland(multipolygon)\n self.get_zoning_buildings(multipolygon, '1')\n self.get_zoning_buildings(multipolygon, '2')\n destination = self.tiles.copy()\n\n # area of a whole single hexagonal tile\n tile_area = self.tiles.to_crs(epsg=6933).head(1)['geometry'].area.item()\n\n for i, tile in destination.iterrows():\n for interest in self.zones.keys():\n # clip zones by hex tile\n if not self.zones[interest].empty:\n local_zoi = gpd.clip(self.zones[interest], tile['geometry']).copy() # zoi = zones of interest\n else:\n local_zoi = gpd.GeoDataFrame()\n # compute interest area in tile\n area = 0\n nb_points = 0\n if len(local_zoi) != 0:\n # replace single points with a defined area\n nb_points = len(only_geo_points(local_zoi))\n area = local_zoi.to_crs(epsg=6933).area.sum()\n destination.loc[i, interest] = area + nb_points * point_area\n # default work rate for non empty area, disabled for now\n # if interest == 'empty':\n # destination.loc[i, 'work'] += (tile_area-area) * \\\n # default_work_coefficient\n\n # combine all work zones into one\n work_zones = [k for k in self.zones.keys() if 'work' in k]\n destination['work'] = destination[work_zones].sum(axis=1)\n # calculate building density for parking\n destination['building_density'] = (destination['buildings1'] + destination['buildings2'] + destination['no_parking_land']) / tile_area\n destination = destination.drop(columns=['buildings1', 'buildings2', 'no_parking_land'])\n return destination\n \n @mongo_cached(collection='tiles', match_field_list=['nuts3', 'nuts-3'], fields=['parking'], extra_transformation=mongo.transform_tiles_from_mongo)\n def load_parking_data(self):\n '''\n Approximate parking probabilities based on building density and input variables \n '''\n tiles_filter = self.data['h3'].isin(self.tiles['h3'])\n destination = self.data.loc[tiles_filter, ['h3', 'building_density']].copy()\n \n # get global parking variables\n prkg_locations = parking_prob.keys()\n prkg_vehicles = parking_prob['destination'].keys()\n\n # calculate parking probabilities for each tile\n for i, tile in destination.iterrows():\n dsty = tile['building_density']\n for p in prkg_locations:\n for v in prkg_vehicles:\n min_prob_bldg_dsty = parking_prob[p][v]['min_prob_bldg_dsty']\n min_prob = parking_prob[p][v]['min_prob']\n max_prob_bldg_dsty = parking_prob[p][v]['max_prob_bldg_dsty']\n max_prob = parking_prob[p][v]['max_prob']\n if dsty >= min_prob_bldg_dsty:\n prob = min_prob\n elif dsty <= max_prob_bldg_dsty:\n prob = max_prob\n else: # min_prob_bldg_dsty > dsty > max_prob_bldg_dsty\n prob = np.round( max_prob - (max_prob - min_prob) * (dsty - max_prob_bldg_dsty)/(min_prob_bldg_dsty - max_prob_bldg_dsty), 4)\n # add columns to destination dataframe\n destination.loc[i,f'parking_{p}_{v}'] = prob\n destination = destination.drop(columns='building_density') # already in the data\n return destination\n\n @mongo_cached(collection='tiles', match_field_list=['h3', '_id'], fields=['nuts-3'])\n def load_regions(self, nuts_file=data_folder+'nuts/NUTS_RG_01M_2021_4326.geojson'):\n '''\n Get the region of each tile (NUTS 3), and load it to the data\n Args:\n nuts_file (str, default): the geojson file containing the official NUTS European regions\n '''\n nuts = gpd.read_file(nuts_file)\n # keep only the most precise level as it contains the other\n nuts3 = nuts[nuts['LEVL_CODE'] == 3][['id', 'geometry']].reset_index(drop=True)\n del nuts\n # nuts regions that intersects with the city (not overlaps)\n place_regions = nuts3.loc[nuts3.intersects(self.shape['geometry'][0]), ['id', 'geometry']]\n place_regions = place_regions.reset_index(drop=True)\n # due to precision differences, the city is overlapping with several regions instead of one\n # regions are defined according to cities boundaries so there should be one region assigned to a city\n # however, a tiled place can span across different regions.\n regions = self.tiles.copy()\n regions['nuts3'] = ''\n # for each tile, compute the intersection area with the regions and keep the largest\n for i, tile in regions.iterrows():\n # check if it intersects before computing the intersection area (otherwise there is a warning)\n intersect = place_regions.intersects(tile['geometry'])\n best_matching_index = place_regions[intersect].intersection(tile['geometry']).to_crs(epsg=6933).area\n if best_matching_index.empty:\n best_matching_index = 0\n else:\n best_matching_index = best_matching_index.argmax()\n regions.loc[i, 'nuts3'] = place_regions.iloc[best_matching_index]['id']\n\n return regions\n \n def load_all(self):\n '''\n Load all the data\n '''\n self.load_population()\n self.load_zoning_data()\n self.load_parking_data()\n\n self.export_place_to_mongo()\n self.export_tiles_to_mongo()\n\n def plot_zoning(self, columns=['population', 'work', 'education', 'leisure'], save_name='filename'):\n '''\n Plot one or several zoning data\n Args:\n columns (list of str): list of columns to plot\n save (str): name of the file to save, the path and city name is automatically added\n '''\n assert len(columns) > 0, 'At least one column is required.'\n for c in columns:\n assert c in self.data.columns, f'The column {c} does not exists in the loaded data.'\n\n nfig = len(columns)\n ncols = (nfig+1)//2\n nrows = 1 if nfig == 1 else 2\n figsize = (3.5*ncols, 3.5*nrows)\n\n fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=figsize)\n for i, c in enumerate(columns):\n if nfig == 1:\n ax = axes\n elif nfig == 2:\n ax = axes[i % 2]\n else:\n ax = axes[i % 2, i//2]\n # add city boundaries\n self.shape.boundary.plot(ax=ax)\n # add column data\n self.data.plot(ax=ax, column=c, colormap='magma')\n ax.set_title(c)\n ax.set_axis_off()\n\n # don't show axis for last subplot\n if nfig > 1 and nfig % 2 == 1:\n axes[1, ncols-1].set_axis_off()\n\n # Display the subplots\n fig.suptitle(self.name)\n if save_name:\n city_name = self.name.split(',')[0]\n plt.savefig(\n data_folder+f'visualization/zoning/{save_name}_{city_name}.png', dpi=300)\n plt.show()\n\n def export_place_to_mongo(self):\n '''\n Push the place data to mongodb\n '''\n n = self.name.split(', ')\n data = {\n 'name': n[0],\n 'country': n[1],\n 'shape': str(self.shape['geometry'][0]),\n 'bbox': str(self.bbox),\n 'tiles': self.tiles['h3'].to_list(),\n 'nuts-3': self.data['nuts3'].unique().tolist(),\n }\n self.mongo_db['places'].update_one({'name': data['name'], 'country': data['country']}, {'$set': data}, upsert=True)\n\n def export_tiles_to_mongo(self):\n '''\n Push the tiles and zoning data to mongodb\n '''\n id_df = self.data[['h3', 'nuts3', 'geometry']].copy()\n id_df['geometry'] = id_df['geometry'].astype(str)\n id_df = id_df.rename(columns={'h3': '_id', 'nuts3': 'nuts-3', 'geometry':'shape'})\n id_array = mongo.df_to_dict(id_df)\n data_df = self.data[['population', 'education', 'leisure', 'empty']].copy()\n data_array = mongo.df_to_dict(data_df)\n for prefix in ['work', 'parking']:\n prefix_df = self.data[[c for c in self.data.columns if prefix in c]].copy()\n if prefix=='work':\n prefix_df = prefix_df.rename(columns={prefix:'total'})\n prefix_array = mongo.df_to_dict(prefix_df)\n # remove prefix\n prefix_array = [{k.replace(prefix+'_', ''):v for k,v in d.items()} for d in prefix_array]\n # merge work with other data\n [d.update({prefix: prefix_array[i]}) for i, d in enumerate(data_array)]\n # add ids and year\n data_array_export = []\n for i, d in enumerate(data_array):\n ids = id_array[i]\n ids.update({self.year: d})\n data_array_export.append(ids)\n # push\n mongo.push_to_collection(self.mongo_db, 'tiles', data_array_export)" }, { "identifier": "CityPlotter", "path": "hiveline/plotting/map.py", "snippet": "class CityPlotter():\n def __init__(self, city, zoom=13):\n self.city = city\n # if len(city.data.columns) <= 3:\n # self.city.load_all()\n self.centroid = self.get_centroid() # [48.857003, 2.3492646]\n self.map = self.get_map(zoom)\n\n def get_centroid(self):\n proj_shape = self.city.shape.to_crs('EPSG:3857')\n centroid = proj_shape.centroid\n c = centroid.to_crs(self.city.shape.crs)\n return [c.y, c.x]\n\n def get_map(self, zoom, dark=True):\n mapstyle = 'cartodbdark_matter' if dark else 'CartoDB positron'\n m = folium.Map(location=self.centroid, zoom_start=zoom, tiles=mapstyle, zoom_control=False)\n return m\n\n def add_city_shape(self, color=\"white\", opacity=0.6, weight=3, dash_array='0, 0'):\n shape = self.city.shape.boundary.__geo_interface__\n shape = folium.GeoJson(data=shape,\n style_function=lambda feature: dict(color=color, weight=weight, opacity=opacity,\n dashArray=dash_array))\n shape.add_to(self.map)\n\n def add_hex_heatmap(self, column):\n max_val = self.city.data[column].max()\n\n # add each tile to the map with the corresponding color\n for _, tile in self.city.data.iterrows():\n geo_j = tile[\"geometry\"].__geo_interface__\n geo_j['color'] = get_mpl_color(tile[column] / max_val)\n geo_j = folium.GeoJson(data=geo_j, style_function=style_heatmap)\n geo_j.add_to(self.map)\n\n def add_custom_hex_heatmap(self, data):\n \"\"\"\n Add a custom heatmap to the map. For adding city input data, use add_hex_heatmap instead.\n :param data: a dictionary with the h3 hexagon id as key and the heat value as value\n :return:\n \"\"\"\n df = pd.DataFrame([\n {\"hexagon\": hexagon, \"count\": count, \"geometry\": _hexagon_to_polygon(hexagon)}\n for hexagon, count in data.items()\n ])\n\n maximum = df['count'].max()\n minimum = df['count'].min()\n\n # Define a color scale\n # linear = cm.LinearColormap(colors=['#00ccff', '#cc6600'], index=[0, 1], vmin=0, vmax=1)\n linear = cm.get_cmap(\"viridis\")\n opacity = 1\n\n # Add Hexagons to the map\n for _, row in df.iterrows():\n val = (row['count'] - minimum) / (maximum - minimum)\n color = matplotlib.colors.rgb2hex(linear(val))\n folium.Polygon(\n locations=row['geometry'],\n fill=True,\n fill_color=color,\n color=color,\n weight=1,\n fill_opacity=opacity,\n opacity=opacity,\n tooltip=f\"{row['count']} trace points\"\n ).add_to(self.map)\n\n def show_map(self):\n return display(self.map)\n\n def setup_webdriver(self):\n \"\"\"\n Creates a new headless chrome webdriver instance\n :return: webdriver instance\n \"\"\"\n options = webdriver.ChromeOptions()\n # do not show chrome\n options.add_argument(\"--headless\")\n driver = webdriver.Chrome(options=options)\n return driver\n\n def style_function(self, color, weight=2, opacity=0.75, dash_array='0, 0'):\n return dict(color=color, weight=weight, opacity=opacity, dashArray=dash_array)\n\n def get_style_function(self, color, weight=2, opacity=0.75, dash_array='0, 0'):\n return lambda feature: self.style_function(color, weight, opacity, dash_array)\n\n def add_traces(self, traces: dict[fptf.Mode, list[LineString]], weight=2,\n opacity=0.75, dash_array='0, 0'):\n \"\"\"\n Add traces to the map\n :param traces: list of trace objects. each trace object is a dict with keys: tdf, color where tdf is a\n trajectory and color is a hex color string\n :param weight: weight of the trace line\n :param opacity: opacity of the trace line\n :param dash_array: dash array of the trace line\n :return:\n \"\"\"\n color_map = {\n fptf.Mode.WALKING: \"#D280CE\",\n fptf.Mode.CAR: \"#FE5F55\",\n fptf.Mode.BUS: \"#F0B67F\",\n fptf.Mode.TRAIN: \"#F7F4D3\",\n fptf.Mode.GONDOLA: \"#F7F4D3\",\n fptf.Mode.WATERCRAFT: \"#F7F4D3\"\n }\n\n draw_order = [fptf.Mode.WALKING, fptf.Mode.CAR, fptf.Mode.BUS, fptf.Mode.TRAIN, fptf.Mode.GONDOLA,\n fptf.Mode.WATERCRAFT]\n\n for mode in draw_order:\n color = color_map[mode]\n\n for line in traces.get(mode, []):\n tgeojson = folium.GeoJson(line,\n name='tgeojson',\n style_function=self.get_style_function(color, weight, opacity, dash_array)\n )\n tgeojson.add_to(self.map)\n\n def export_to_png(self, folder='images/', filename='image', tall_city=False, webdriver=None):\n if webdriver is None:\n webdriver = self.setup_webdriver()\n\n if not folder.endswith(\"/\"):\n folder += \"/\"\n if not os.path.exists(PROJECT_PATH + 'visualization/' + folder):\n os.makedirs(PROJECT_PATH + 'visualization/' + folder)\n\n filepath = PROJECT_PATH + 'visualization/' + folder + filename\n filepath_html = filepath + '.html'\n self.map.save(filepath_html)\n # image resolution\n ratio = 1920 / 1080\n height = 1200 if tall_city else 1080\n width = height * ratio\n webdriver.set_window_size(width, height)\n webdriver.get(\"file:///\" + filepath + '.html')\n time.sleep(0.2)\n webdriver.save_screenshot(filepath + '.png')\n if os.path.exists(filepath_html):\n os.remove(filepath_html)\n\n return filepath + \".png\"" }, { "identifier": "get_line_traces_by_mode", "path": "hiveline/plotting/map.py", "snippet": "def get_line_traces_by_mode(traces: list[list[tuple[tuple[float, float], datetime.datetime, fptf.Mode, bool]]]) -> dict[\n fptf.Mode, list[LineString]]:\n mode_trace_lists = [_extract_mode_traces(trace) for trace in traces]\n mode_traces = [item for sublist in mode_trace_lists for item in sublist]\n\n # group traces by mode\n traces_by_mode = {}\n\n for line, mode in mode_traces:\n if mode not in traces_by_mode:\n traces_by_mode[mode] = []\n traces_by_mode[mode].append(line)\n\n return traces_by_mode" }, { "identifier": "add_line_traces", "path": "hiveline/plotting/map.py", "snippet": "def add_line_traces(a: dict[fptf.Mode, list[LineString]], b: dict[fptf.Mode, list[LineString]]) -> dict[\n fptf.Mode, list[LineString]]:\n for mode, traces in b.items():\n if mode not in a:\n a[mode] = []\n a[mode].extend(traces)\n\n return a" }, { "identifier": "Journeys", "path": "hiveline/results/journeys.py", "snippet": "class Journeys:\n def __init__(self, sim_id: str, db=None, use_cache=True, cache=\"./cache\"):\n if db is None:\n db = get_database()\n self.db = db\n self.sim_id = sim_id\n if cache.endswith(\"/\"):\n cache = cache[:-1]\n self.use_cache = use_cache\n self.cache = cache + \"/hiveline-journeys\"\n ensure_directory(self.cache)\n\n self.options = self.__find_all()\n\n def __find_all(self):\n # check if cached\n if self.use_cache and os.path.isfile(self.cache + \"/\" + self.sim_id + \".json\"):\n print(\"Found cached results\")\n return self.__load_cache()\n\n t = datetime.datetime.now()\n\n results = list(self.db[\"route-results\"].find({\"sim-id\": self.sim_id}))\n\n print(f\"Found {len(results)} results in {datetime.datetime.now() - t}\")\n t = datetime.datetime.now()\n\n options = [Options(r) for r in results]\n\n self.__save_cache(options)\n\n print(f\"Converted {len(options)} results in {datetime.datetime.now() - t}\")\n\n return options\n\n def iterate(self) -> Generator[Options, None, None]:\n for o in self.options:\n yield o\n\n def iterate_selection(self, selection: list[str | None]) -> Generator[Option, None, None]:\n for (i, sel) in enumerate(selection):\n if sel is not None:\n yield self.options[i].get_option(sel)\n\n def iterate_traces(self, selection=None) -> Generator[list[tuple[tuple[float, float], datetime.datetime, fptf.Mode, bool]], None, None]:\n for (i, o) in enumerate(self.options):\n if selection is not None and i >= len(selection):\n break\n\n opt = o.options\n if selection is not None:\n option = o.get_option(selection[i])\n if option is None:\n continue\n opt = [option]\n\n for option in opt:\n yield option.get_trace()\n\n def get_selection(self, decision: Callable[[Options], Option | None], max_count=None) -> list[str | None]:\n \"\"\"\n Get the selection of options based on a decision function\n :param decision: the decision function\n :param max_count: (optional) the maximum number of options to return\n :return:\n \"\"\"\n options = self.options\n if max_count is not None:\n options = options[:max_count]\n\n decided = [decision(o) for o in options]\n return [o.id if o is not None else None for o in decided]\n\n def __load_cache(self):\n with open(self.cache + \"/\" + self.sim_id + \".json\", \"r\") as f:\n return [Options(o) for o in json.load(f)]\n\n def __save_cache(self, options: list[Options]):\n with open(self.cache + \"/\" + self.sim_id + \".json\", \"w\") as f:\n json.dump([o.to_dict() for o in options], f)\n\n def prepare_traces(self):\n \"\"\"\n Prepare the traces for all options\n :return:\n \"\"\"\n for o in self.options:\n for option in o.options:\n option.get_trace()" }, { "identifier": "decide", "path": "hiveline/results/modal_shares.py", "snippet": "def decide(options: Options, params: Params = None) -> Option | None:\n \"\"\"\n Decide on a route option\n :param options: the route options\n :param params: the simulation parameters\n :return: the chosen route option\n \"\"\"\n if params is None:\n params = Params()\n\n would_use_car = vc_extract.would_use_motorized_vehicle(\n options.traveller.to_dict()) # would the vc use a motorized vehicle?\n\n has_car = vc_extract.has_motor_vehicle(options.traveller.to_dict()) # does the vc have a motorized vehicle?\n\n if not has_car and random.random() < params.car_ownership_override:\n has_car = True\n would_use_car = True\n\n if not would_use_car and has_car and random.random() < params.car_usage_override:\n would_use_car = True\n\n valid_options = options.options\n\n if not would_use_car:\n valid_options = [o for o in valid_options if not o.has_car()]\n\n if len(valid_options) == 0:\n return None\n\n durations = [o.journey.duration() for o in valid_options]\n durations = [d if d is not None else 0 for d in durations]\n\n return valid_options[durations.index(min(durations))] # choose the fastest option" }, { "identifier": "Params", "path": "hiveline/results/modal_shares.py", "snippet": "class Params:\n \"\"\"\n Simulation parameters for congestion and modal share analysis\n \"\"\"\n num_citizens = 2000000\n vehicle_factor = 0.00007\n vcs_car_usage_start = 0.5\n mix_factor = 0.1\n max_iterations = 100\n car_ownership_override = 0 # probability that a vc will own a car even though they don't have one. all of these would use it as well.\n car_usage_override = 0 # probability that a car owner would choose a car even though there is no parking" } ]
from hiveline.od.place import Place from hiveline.plotting.map import CityPlotter, get_line_traces_by_mode, add_line_traces from hiveline.results.journeys import Journeys from hiveline.results.modal_shares import decide, Params
8,279
def _prepare_traces(journeys: Journeys, only_use_selected=True): selection: list[str] | None = None if only_use_selected: selection = journeys.get_selection(lambda options: decide(options, Params())) print("Extracting traces...") return [trace for trace in journeys.iterate_traces(selection)] def plot_trace_animation(journeys: Journeys, only_use_selected=True, zoom_level=13, tall_city=False, fps=30, duration=30): raw_traces = _prepare_traces(journeys, only_use_selected=only_use_selected) print("Plotting traces...") total_frames = fps * duration num_to_plot = 0 num_step = int(len(raw_traces) / total_frames) plotter = CityPlotter(place, zoom=zoom_level) webdriver = plotter.setup_webdriver() traces = {} for i in range(total_frames): print(f"Frame {i} of {total_frames}") raw_to_add = raw_traces[num_to_plot:num_to_plot + num_step]
def _prepare_traces(journeys: Journeys, only_use_selected=True): selection: list[str] | None = None if only_use_selected: selection = journeys.get_selection(lambda options: decide(options, Params())) print("Extracting traces...") return [trace for trace in journeys.iterate_traces(selection)] def plot_trace_animation(journeys: Journeys, only_use_selected=True, zoom_level=13, tall_city=False, fps=30, duration=30): raw_traces = _prepare_traces(journeys, only_use_selected=only_use_selected) print("Plotting traces...") total_frames = fps * duration num_to_plot = 0 num_step = int(len(raw_traces) / total_frames) plotter = CityPlotter(place, zoom=zoom_level) webdriver = plotter.setup_webdriver() traces = {} for i in range(total_frames): print(f"Frame {i} of {total_frames}") raw_to_add = raw_traces[num_to_plot:num_to_plot + num_step]
traces_to_add = get_line_traces_by_mode(raw_to_add)
2
2023-11-07 15:34:04+00:00
12k
uhppoted/uhppoted-app-home-assistant
custom_components/uhppoted/config_flow.py
[ { "identifier": "DOMAIN", "path": "custom_components/uhppoted/const.py", "snippet": "DOMAIN = 'uhppoted'" }, { "identifier": "CONF_BIND_ADDR", "path": "custom_components/uhppoted/const.py", "snippet": "CONF_BIND_ADDR = 'bind_address'" }, { "identifier": "CONF_BROADCAST_ADDR", "path": "custom_components/uhppoted/const.py", "snippet": "CONF_BROADCAST_ADDR = 'broadcast_address'" }, { "identifier": "CONF_LISTEN_ADDR", "path": "custom_components/uhppoted/const.py", "snippet": "CONF_LISTEN_ADDR = 'listen_address'" }, { "identifier": "CONF_DEBUG", "path": "custom_components/uhppoted/const.py", "snippet": "CONF_DEBUG = 'debug'" }, { "identifier": "CONF_CONTROLLERS", "path": "custom_components/uhppoted/const.py", "snippet": "CONF_CONTROLLERS = 'controllers'" }, { "identifier": "CONF_CONTROLLER_ID", "path": "custom_components/uhppoted/const.py", "snippet": "CONF_CONTROLLER_ID = 'controller_id'" }, { "identifier": "CONF_CONTROLLER_SERIAL_NUMBER", "path": "custom_components/uhppoted/const.py", "snippet": "CONF_CONTROLLER_SERIAL_NUMBER = 'controller_serial_number'" }, { "identifier": "CONF_CONTROLLER_ADDR", "path": "custom_components/uhppoted/const.py", "snippet": "CONF_CONTROLLER_ADDR = 'controller_address'" }, { "identifier": "CONF_CONTROLLER_TIMEZONE", "path": "custom_components/uhppoted/const.py", "snippet": "CONF_CONTROLLER_TIMEZONE = 'controller_timezone'" }, { "identifier": "CONF_DOORS", "path": "custom_components/uhppoted/const.py", "snippet": "CONF_DOORS = 'doors'" }, { "identifier": "CONF_DOOR_ID", "path": "custom_components/uhppoted/const.py", "snippet": "CONF_DOOR_ID = 'door_id'" }, { "identifier": "CONF_DOOR_CONTROLLER", "path": "custom_components/uhppoted/const.py", "snippet": "CONF_DOOR_CONTROLLER = 'door_controller'" }, { "identifier": "CONF_DOOR_NUMBER", "path": "custom_components/uhppoted/const.py", "snippet": "CONF_DOOR_NUMBER = 'door_number'" }, { "identifier": "CONF_CARDS", "path": "custom_components/uhppoted/const.py", "snippet": "CONF_CARDS = 'cards'" }, { "identifier": "CONF_CARD_UNIQUE_ID", "path": "custom_components/uhppoted/const.py", "snippet": "CONF_CARD_UNIQUE_ID = 'card_unique_id'" }, { "identifier": "CONF_CARD_NUMBER", "path": "custom_components/uhppoted/const.py", "snippet": "CONF_CARD_NUMBER = 'card_number'" }, { "identifier": "CONF_CARD_NAME", "path": "custom_components/uhppoted/const.py", "snippet": "CONF_CARD_NAME = 'card_name'" }, { "identifier": "CONF_CARD_STARTDATE", "path": "custom_components/uhppoted/const.py", "snippet": "CONF_CARD_STARTDATE = 'card_startdate'" }, { "identifier": "CONF_CARD_ENDDATE", "path": "custom_components/uhppoted/const.py", "snippet": "CONF_CARD_ENDDATE = 'card_enddate'" }, { "identifier": "CONF_CARD_DOORS", "path": "custom_components/uhppoted/const.py", "snippet": "CONF_CARD_DOORS = 'card_doors'" }, { "identifier": "DEFAULT_CONTROLLER_ID", "path": "custom_components/uhppoted/const.py", "snippet": "DEFAULT_CONTROLLER_ID = 'Alpha'" }, { "identifier": "DEFAULT_CONTROLLER_ADDR", "path": "custom_components/uhppoted/const.py", "snippet": "DEFAULT_CONTROLLER_ADDR = '192.168.1.100'" }, { "identifier": "DEFAULT_CONTROLLER_TIMEZONE", "path": "custom_components/uhppoted/const.py", "snippet": "DEFAULT_CONTROLLER_TIMEZONE = 'LOCAL'" }, { "identifier": "DEFAULT_DOOR1", "path": "custom_components/uhppoted/const.py", "snippet": "DEFAULT_DOOR1 = 'Gryffindor'" }, { "identifier": "DEFAULT_DOOR2", "path": "custom_components/uhppoted/const.py", "snippet": "DEFAULT_DOOR2 = 'Ravenclaw'" }, { "identifier": "DEFAULT_DOOR3", "path": "custom_components/uhppoted/const.py", "snippet": "DEFAULT_DOOR3 = 'Hufflepuff'" }, { "identifier": "DEFAULT_DOOR4", "path": "custom_components/uhppoted/const.py", "snippet": "DEFAULT_DOOR4 = 'Slytherin'" }, { "identifier": "UhppotedOptionsFlow", "path": "custom_components/uhppoted/options_flow.py", "snippet": "class UhppotedOptionsFlow(OptionsFlow):\n\n def __init__(self, entry: ConfigEntry) -> None:\n self.config_entry = entry\n self.data = dict(entry.data)\n # self.options = dict(entry.options)\n self.options = copy.deepcopy(dict(entry.options))\n self.controllers = []\n self.doors = []\n self.configuration = {'doors': []}\n\n async def async_step_init(self, user_input: dict[str, Any] | None = None) -> FlowResult:\n return await self.async_step_IPv4()\n\n async def async_step_IPv4(self, user_input: Optional[Dict[str, Any]] = None):\n errors: Dict[str, str] = {}\n\n if user_input is not None:\n if not errors:\n self.options.update(user_input)\n return await self.async_step_controllers()\n\n bind = self.options[CONF_BIND_ADDR]\n broadcast = self.options[CONF_BROADCAST_ADDR]\n listen = self.options[CONF_LISTEN_ADDR]\n debug = self.options[CONF_DEBUG]\n\n schema = vol.Schema({\n vol.Optional(CONF_BIND_ADDR, default=bind): str,\n vol.Optional(CONF_BROADCAST_ADDR, default=broadcast): str,\n vol.Optional(CONF_LISTEN_ADDR, default=listen): str,\n vol.Optional(CONF_DEBUG, default=debug): bool,\n })\n\n return self.async_show_form(step_id=\"IPv4\", data_schema=schema, errors=errors)\n\n async def async_step_controllers(self, user_input: Optional[Dict[str, Any]] = None):\n\n def g(v):\n if self.options and CONF_CONTROLLERS in self.options:\n for c in self.options[CONF_CONTROLLERS]:\n if f'{c[CONF_CONTROLLER_SERIAL_NUMBER]}' == f'{v}':\n if c[CONF_CONTROLLER_ID] != '':\n return {\n 'label': f'{v} ({c[CONF_CONTROLLER_ID]})',\n 'value': f'{v}',\n }\n break\n return {\n 'label': f'{v}',\n 'value': f'{v}',\n }\n\n errors: Dict[str, str] = {}\n\n if user_input is not None:\n if not errors:\n for v in user_input[CONF_CONTROLLERS]:\n self.controllers.append({\n 'controller': {\n 'name': '',\n 'serial_no': v,\n 'configured': False,\n },\n 'doors': None,\n })\n\n return await self.async_step_controller()\n\n controllers = get_all_controllers(self.options)\n if len(controllers) < 1:\n return await self.async_step_door()\n\n configured = set()\n if self.options and CONF_CONTROLLERS in self.options:\n for v in self.options[CONF_CONTROLLERS]:\n configured.add(int(f'{v[CONF_CONTROLLER_SERIAL_NUMBER]}'))\n\n configured = sorted(list(configured), reverse=True)\n\n try:\n validate_all_controllers(self.options)\n except ValueError as err:\n errors['base'] = f'{err}'\n\n schema = vol.Schema({\n vol.Required(CONF_CONTROLLERS, default=[f'{v}' for v in configured]):\n SelectSelector(\n SelectSelectorConfig(options=[g(v) for v in controllers],\n multiple=True,\n custom_value=False,\n mode=SelectSelectorMode.LIST)),\n })\n\n return self.async_show_form(step_id=\"controllers\", data_schema=schema, errors=errors)\n\n async def async_step_controller(self, user_input: Optional[Dict[str, Any]] = None):\n it = next((v for v in self.controllers if not v['controller']['configured']), None)\n if it == None:\n try:\n validate_all_controllers(self.options)\n return await self.async_step_doors()\n except ValueError as err:\n return await self.async_step_controllers()\n else:\n controller = it['controller']\n serial_no = controller['serial_no']\n\n errors: Dict[str, str] = {}\n\n if user_input is not None:\n name = user_input[CONF_CONTROLLER_ID]\n address = user_input[CONF_CONTROLLER_ADDR]\n timezone = user_input[CONF_CONTROLLER_TIMEZONE]\n\n try:\n validate_controller_id(serial_no, name, None)\n except ValueError as err:\n errors[CONF_CONTROLLER_ID] = f'{err}'\n\n if not errors:\n controllers = self.options[CONF_CONTROLLERS]\n\n for v in self.options[CONF_CONTROLLERS]:\n if int(f'{v[CONF_CONTROLLER_SERIAL_NUMBER]}') == int(f'{serial_no}'):\n if user_input[CONF_CONTROLLER_ID].strip() == '-':\n controllers.remove(v)\n else:\n v[CONF_CONTROLLER_ID] = name\n v[CONF_CONTROLLER_SERIAL_NUMBER] = serial_no\n v[CONF_CONTROLLER_ADDR] = address\n v[CONF_CONTROLLER_TIMEZONE] = timezone\n break\n else:\n if user_input[CONF_CONTROLLER_ID].strip() != '-':\n controllers.append({\n CONF_CONTROLLER_ID: name,\n CONF_CONTROLLER_SERIAL_NUMBER: serial_no,\n CONF_CONTROLLER_ADDR: address,\n CONF_CONTROLLER_TIMEZONE: timezone,\n })\n\n self.options.update({CONF_CONTROLLERS: controllers})\n\n controller['name'] = user_input[CONF_CONTROLLER_ID]\n controller['configured'] = True\n\n return await self.async_step_controller()\n\n defaults = {\n CONF_CONTROLLER_ID: DEFAULT_CONTROLLER_ID,\n CONF_CONTROLLER_ADDR: DEFAULT_CONTROLLER_ADDR,\n CONF_CONTROLLER_TIMEZONE: DEFAULT_CONTROLLER_TIMEZONE,\n }\n\n if CONF_CONTROLLERS in self.options:\n for v in self.options[CONF_CONTROLLERS]:\n if int(f'{v[CONF_CONTROLLER_SERIAL_NUMBER]}') == int(f'{serial_no}'):\n for k in [CONF_CONTROLLER_ID, CONF_CONTROLLER_ADDR, CONF_CONTROLLER_TIMEZONE]:\n if k in v:\n defaults[k] = v[k]\n break\n\n if user_input is not None:\n for k in [CONF_CONTROLLER_ID, CONF_CONTROLLER_ADDR, CONF_CONTROLLER_TIMEZONE]:\n if k in user_input:\n defaults[k] = user_input[k]\n\n schema = vol.Schema({\n vol.Required(CONF_CONTROLLER_ID, default=defaults[CONF_CONTROLLER_ID]): str,\n vol.Optional(CONF_CONTROLLER_ADDR, default=defaults[CONF_CONTROLLER_ADDR]): str,\n vol.Optional(CONF_CONTROLLER_TIMEZONE, default=defaults[CONF_CONTROLLER_TIMEZONE]): str,\n })\n\n return self.async_show_form(step_id=\"controller\",\n data_schema=schema,\n errors=errors,\n description_placeholders={\n \"serial_no\": serial_no,\n })\n\n async def async_step_doors(self, user_input: Optional[Dict[str, Any]] = None):\n\n def f(v):\n return v[CONF_CONTROLLER_ID] in [u['controller'] for u in self.configuration['doors']]\n\n def g(d):\n door = d[CONF_DOOR_ID]\n no = d[CONF_DOOR_NUMBER]\n return {\n 'label': f'Door {no} ({door})' if door else f'Door {no}',\n 'value': f'{no}',\n }\n\n all_doors = get_all_doors(self.options)\n it = next((v for v in all_doors if not f(v)), None)\n if it == None:\n return await self.async_step_door()\n else:\n controller = it[CONF_CONTROLLER_ID]\n serial_no = it[CONF_CONTROLLER_SERIAL_NUMBER]\n doors = it['doors']\n\n errors: Dict[str, str] = {}\n try:\n validate_all_doors(self.options)\n except ValueError as err:\n errors['base'] = f'{err}'\n\n if user_input is not None:\n self.configuration['doors'].append({\n 'controller': controller,\n 'serial_no': serial_no,\n 'doors': [int(f'{v}') for v in user_input['doors']],\n 'configured': False,\n })\n\n return await self.async_step_doors()\n\n select = SelectSelectorConfig(options=[g(v) for v in doors],\n multiple=True,\n custom_value=False,\n mode=SelectSelectorMode.LIST) # yapf: disable\n\n schema = vol.Schema({\n vol.Required('doors', default=[f'{v[CONF_DOOR_NUMBER]}' for v in doors if v[CONF_DOOR_ID]]):\n SelectSelector(select),\n })\n\n placeholders = {\n 'controller': f'{controller}',\n 'serial_no': f'{serial_no}',\n }\n\n return self.async_show_form(step_id=\"doors\",\n data_schema=schema,\n errors=errors,\n description_placeholders=placeholders)\n\n async def async_step_door(self, user_input: Optional[Dict[str, Any]] = None):\n\n def f(v):\n return len(v['doors']) > 0 and not v['configured']\n\n it = next((v for v in self.configuration['doors'] if f(v)), None)\n if it == None:\n try:\n validate_all_doors(self.options)\n return await self.async_step_cards()\n except ValueError as err:\n self.configuration['doors'] = []\n\n return await self.async_step_doors()\n\n else:\n controller = it['controller']\n serial_no = it['serial_no']\n doors = it['doors']\n\n errors: Dict[str, str] = {}\n if user_input is not None:\n l = [user_input[f'door{v}_id'] for v in doors]\n for d in doors:\n try:\n k = f'door{d}_id'\n v = user_input[k]\n validate_door_id(v, None)\n validate_door_duplicates(v, l)\n except ValueError as err:\n errors[k] = f'{err}'\n\n if not errors:\n l = self.options[CONF_DOORS]\n\n for door in doors:\n k = f'door{door}_id'\n for d in l:\n if d[CONF_DOOR_CONTROLLER] == controller and f'{d[CONF_DOOR_NUMBER]}' == f'{door}':\n if user_input[k].strip() == '-':\n l.remove(d)\n else:\n d[CONF_DOOR_ID] = user_input[k]\n break\n else:\n if user_input[k].strip() != '-':\n l.append({\n CONF_DOOR_ID: user_input[k],\n CONF_DOOR_CONTROLLER: controller,\n CONF_DOOR_NUMBER: door,\n })\n\n self.options.update({CONF_DOORS: l})\n it['configured'] = True\n\n return await self.async_step_door()\n\n defaults = {\n 'door1_id': DEFAULT_DOOR1,\n 'door2_id': DEFAULT_DOOR2,\n 'door3_id': DEFAULT_DOOR3,\n 'door4_id': DEFAULT_DOOR4,\n }\n\n if user_input is not None:\n for v in ['door1_id', 'door2_id', 'door3_id', 'door4_id']:\n if k in user_input:\n defaults[k] = user_input[k]\n\n for v in self.options[CONF_DOORS]:\n if v[CONF_DOOR_CONTROLLER] == controller and v[CONF_DOOR_NUMBER] == 1:\n defaults['door1_id'] = v[CONF_DOOR_ID]\n\n if v[CONF_DOOR_CONTROLLER] == controller and v[CONF_DOOR_NUMBER] == 2:\n defaults['door2_id'] = v[CONF_DOOR_ID]\n\n if v[CONF_DOOR_CONTROLLER] == controller and v[CONF_DOOR_NUMBER] == 3:\n defaults['door3_id'] = v[CONF_DOOR_ID]\n\n if v[CONF_DOOR_CONTROLLER] == controller and v[CONF_DOOR_NUMBER] == 4:\n defaults['door4_id'] = v[CONF_DOOR_ID]\n\n schema = vol.Schema({})\n\n if 1 in doors:\n schema = schema.extend({vol.Optional('door1_id', default=defaults['door1_id']): str})\n\n if 2 in doors:\n schema = schema.extend({vol.Optional('door2_id', default=defaults['door2_id']): str})\n\n if 3 in doors:\n schema = schema.extend({vol.Optional('door3_id', default=defaults['door3_id']): str})\n\n if 4 in doors:\n schema = schema.extend({vol.Optional('door4_id', default=defaults['door4_id']): str})\n\n placeholders = {\n 'controller': f'{controller}',\n 'serial_no': f'{serial_no}',\n }\n\n return self.async_show_form(step_id=\"door\",\n data_schema=schema,\n errors=errors,\n description_placeholders=placeholders)\n\n async def async_step_cards(self, user_input: Optional[Dict[str, Any]] = None):\n\n def g(c):\n card = c[CONF_CARD_NUMBER]\n cardholder = c[CONF_CARD_NAME]\n return {\n 'label': f'{card} ({cardholder})' if cardholder and cardholder.strip() != '' else f'{card}',\n 'value': f'{card}',\n }\n\n errors: Dict[str, str] = {}\n if user_input is not None:\n if not errors:\n self.configuration['cards'] = [{\n 'card': get_card(v, self.options),\n 'configured': False,\n } for v in user_input[CONF_CARDS]]\n\n return await self.async_step_card()\n\n cards = get_all_cards(self.options)\n defaults = [f'{v[CONF_CARD_NUMBER]}' for v in self.options[CONF_CARDS]] if CONF_CARDS in self.options else []\n\n # if len(cards) < 2:\n # self.configuration['cards'] = [{\n # 'card': v,\n # 'configured': False,\n # } for v in cards]\n #\n # return await self.async_step_card()\n\n select = SelectSelectorConfig(options=[g(v) for v in cards],\n multiple=True,\n custom_value=False,\n mode=SelectSelectorMode.LIST) # yapf: disable\n\n schema = vol.Schema({\n vol.Required(CONF_CARDS, default=defaults): SelectSelector(select),\n })\n\n return self.async_show_form(step_id=\"cards\", data_schema=schema, errors=errors)\n\n async def async_step_card(self, user_input: Optional[Dict[str, Any]] = None):\n\n def f(v):\n return not v['configured']\n\n it = next((v for v in self.configuration['cards'] if f(v)), None)\n if it == None:\n try:\n validate_all_cards(self.options)\n return self.async_create_entry(title=\"uhppoted\", data=self.options)\n except ValueError as err:\n self.configuration['cards'] = []\n return await self.async_step_cards()\n\n else:\n card = it['card'][CONF_CARD_NUMBER]\n cardholder = it['card'][CONF_CARD_NAME]\n unique_id = it['card'][CONF_CARD_UNIQUE_ID]\n\n errors: Dict[str, str] = {}\n if user_input is not None:\n try:\n validate_card_id(user_input[CONF_CARD_NAME])\n except ValueError as err:\n errors[CONF_CARD_NAME] = f'{err}'\n\n if not errors:\n v = self.options[CONF_CARDS] if CONF_CARDS in self.options else []\n\n for c in v:\n if int(f'{c[CONF_CARD_NUMBER]}') == int(f'{card}'):\n c[CONF_CARD_NAME] = user_input[CONF_CARD_NAME]\n break\n else:\n v.append({\n CONF_CARD_NUMBER: card,\n CONF_CARD_NAME: user_input[CONF_CARD_NAME],\n CONF_CARD_UNIQUE_ID: unique_id,\n })\n\n self.options.update({CONF_CARDS: v})\n it['configured'] = True\n\n return await self.async_step_card()\n\n defaults = {\n CONF_CARD_NAME: f'{cardholder}',\n }\n\n if user_input is not None:\n for v in [CONF_CARD_NAME]:\n if k in user_input:\n defaults[k] = user_input[k]\n\n schema = vol.Schema({\n vol.Required(CONF_CARD_NAME, default=defaults[CONF_CARD_NAME]): str,\n })\n\n placeholders = {\n 'card': f'{card}',\n 'cardholder': f'{cardholder}',\n }\n\n return self.async_show_form(step_id=\"card\",\n data_schema=schema,\n errors=errors,\n description_placeholders=placeholders)" }, { "identifier": "validate_controller_id", "path": "custom_components/uhppoted/config.py", "snippet": "def validate_controller_id(serial_no, name, options):\n if not name or name.strip() == '':\n raise ValueError(ERR_INVALID_CONTROLLER_ID)\n\n if options and CONF_CONTROLLERS in options:\n for v in options[CONF_CONTROLLERS]:\n if normalise(v[CONF_CONTROLLER_ID]) == normalise(name):\n if int(f'{v[CONF_CONTROLLER_SERIAL_NUMBER]}') != int(f'{serial_no}'):\n raise ValueError(ERR_DUPLICATE_CONTROLLER_ID)" }, { "identifier": "validate_door_id", "path": "custom_components/uhppoted/config.py", "snippet": "def validate_door_id(name, options):\n if not name or name.strip() == '':\n raise ValueError(ERR_INVALID_DOOR_ID)\n\n if name.strip() != '-' and options and CONF_DOORS in options:\n for v in options[CONF_DOORS]:\n if normalise(v[CONF_DOOR_ID]) == normalise(name):\n raise ValueError(ERR_DUPLICATE_DOOR_ID)" }, { "identifier": "validate_door_duplicates", "path": "custom_components/uhppoted/config.py", "snippet": "def validate_door_duplicates(name, doors):\n normalised = [normalise(v) for v in doors]\n normalised = [v for v in normalised if v != '']\n\n if normalised.count(normalise(name)) > 1:\n raise ValueError(ERR_DUPLICATE_DOOR_ID)" }, { "identifier": "validate_card_id", "path": "custom_components/uhppoted/config.py", "snippet": "def validate_card_id(name):\n if not name or name.strip() == '':\n raise ValueError(ERR_INVALID_CARD_ID)" }, { "identifier": "validate_all_cards", "path": "custom_components/uhppoted/config.py", "snippet": "def validate_all_cards(options):\n pass" }, { "identifier": "get_IPv4", "path": "custom_components/uhppoted/config.py", "snippet": "def get_IPv4(defaults):\n bind = '0.0.0.0'\n broadcast = '255.255.255.255:60000'\n listen = '0.0.0.0:60001'\n debug = False\n\n if CONF_BIND_ADDR in defaults:\n bind = defaults[CONF_BIND_ADDR]\n\n if CONF_BROADCAST_ADDR in defaults:\n broadcast = defaults[CONF_BROADCAST_ADDR]\n\n if CONF_LISTEN_ADDR in defaults:\n listen = defaults[CONF_LISTEN_ADDR]\n\n if CONF_DEBUG in defaults:\n debug = defaults[CONF_DEBUG]\n\n return {\n CONF_BIND_ADDR: bind,\n CONF_BROADCAST_ADDR: broadcast,\n CONF_LISTEN_ADDR: listen,\n CONF_DEBUG: debug,\n }" }, { "identifier": "get_all_controllers", "path": "custom_components/uhppoted/config.py", "snippet": "def get_all_controllers(options):\n controllers = set()\n if CONF_CONTROLLERS in options:\n for v in options[CONF_CONTROLLERS]:\n controllers.add(int(f'{v[CONF_CONTROLLER_SERIAL_NUMBER]}'))\n\n try:\n bind = options[CONF_BIND_ADDR]\n broadcast = options[CONF_BROADCAST_ADDR]\n listen = options[CONF_LISTEN_ADDR]\n debug = options[CONF_DEBUG]\n u = uhppote.Uhppote(bind, broadcast, listen, debug)\n\n response = u.get_all_controllers()\n\n for v in response:\n controllers.add(v.controller)\n\n except Exception as e:\n _LOGGER.exception(f'error retrieving list of controllers ({e})')\n\n return sorted(list(controllers), reverse=True)" }, { "identifier": "get_all_cards", "path": "custom_components/uhppoted/config.py", "snippet": "def get_all_cards(options):\n cards = dict()\n\n # ... get controller cards\n bind = options[CONF_BIND_ADDR]\n broadcast = options[CONF_BROADCAST_ADDR]\n listen = options[CONF_LISTEN_ADDR]\n debug = options[CONF_DEBUG]\n u = uhppote.Uhppote(bind, broadcast, listen, debug)\n\n controllers = options[CONF_CONTROLLERS]\n\n for c in controllers:\n controller = int(f'{c[CONF_CONTROLLER_SERIAL_NUMBER]}'.strip())\n\n try:\n response = u.get_cards(controller)\n _LOGGER.info(f'{controller}: {response.cards} cards')\n\n N = min(response.cards, MAX_CARDS)\n ix = 1\n count = 0\n errors = 0\n\n while count < N and ix < MAX_CARD_INDEX and len(cards) < MAX_CARDS and errors < MAX_ERRORS:\n try:\n response = u.get_card_by_index(controller, ix)\n cards[response.card_number] = {\n CONF_CARD_NUMBER: response.card_number,\n CONF_CARD_UNIQUE_ID: uuid.uuid4(),\n CONF_CARD_NAME: None,\n CONF_CARD_STARTDATE: None,\n CONF_CARD_ENDDATE: None,\n CONF_CARD_DOORS: [],\n }\n count += 1\n ix += 1\n except Exception as e:\n errors += 1\n _LOGGER.warning(f'{controller} error retrieving card at index {ix} ({e})')\n\n except Exception as e:\n _LOGGER.warning(f'{controller} error retrieving list of cards ({e})')\n\n # ... add cards from options\n if options and CONF_CARDS in options:\n for v in options[CONF_CARDS]:\n k = int(f'{v[CONF_CARD_NUMBER]}')\n cards[k] = v\n\n # ... convert cards list to records\n\n return [cards[k] for k in sorted(cards.keys())]" }, { "identifier": "default_card_start_date", "path": "custom_components/uhppoted/config.py", "snippet": "def default_card_start_date():\n return datetime.date.today()" }, { "identifier": "default_card_end_date", "path": "custom_components/uhppoted/config.py", "snippet": "def default_card_end_date():\n today = datetime.date.today()\n end_date = today + datetime.timedelta(days=180)\n year = end_date.year\n month = end_date.month\n day = calendar.monthrange(end_date.year, end_date.month)[1]\n\n return datetime.date(year, month, day)" } ]
import logging import re import uuid import voluptuous as vol from typing import Any from typing import Dict from typing import Optional from homeassistant.core import HomeAssistant from homeassistant.core import callback from homeassistant.config_entries import ConfigFlow from homeassistant.config_entries import OptionsFlow from homeassistant.config_entries import ConfigEntry from homeassistant.helpers import selector from homeassistant.helpers.selector import SelectSelector from homeassistant.helpers.selector import SelectSelectorConfig from homeassistant.helpers.selector import SelectSelectorMode from homeassistant.helpers import config_validation as cv from uhppoted import uhppote from .const import DOMAIN from .const import CONF_BIND_ADDR from .const import CONF_BROADCAST_ADDR from .const import CONF_LISTEN_ADDR from .const import CONF_DEBUG from .const import CONF_CONTROLLERS from .const import CONF_CONTROLLER_ID from .const import CONF_CONTROLLER_SERIAL_NUMBER from .const import CONF_CONTROLLER_ADDR from .const import CONF_CONTROLLER_TIMEZONE from .const import CONF_DOORS from .const import CONF_DOOR_ID from .const import CONF_DOOR_CONTROLLER from .const import CONF_DOOR_NUMBER from .const import CONF_CARDS from .const import CONF_CARD_UNIQUE_ID from .const import CONF_CARD_NUMBER from .const import CONF_CARD_NAME from .const import CONF_CARD_STARTDATE from .const import CONF_CARD_ENDDATE from .const import CONF_CARD_DOORS from .const import DEFAULT_CONTROLLER_ID from .const import DEFAULT_CONTROLLER_ADDR from .const import DEFAULT_CONTROLLER_TIMEZONE from .const import DEFAULT_DOOR1 from .const import DEFAULT_DOOR2 from .const import DEFAULT_DOOR3 from .const import DEFAULT_DOOR4 from .options_flow import UhppotedOptionsFlow from .config import validate_controller_id from .config import validate_door_id from .config import validate_door_duplicates from .config import validate_card_id from .config import validate_all_cards from .config import get_IPv4 from .config import get_all_controllers from .config import get_all_cards from .config import default_card_start_date from .config import default_card_end_date
8,113
v.append({ CONF_CONTROLLER_ID: name, CONF_CONTROLLER_SERIAL_NUMBER: serial_no, CONF_CONTROLLER_ADDR: address, CONF_CONTROLLER_TIMEZONE: timezone, }) self.options.update({CONF_CONTROLLERS: v}) controller['name'] = user_input[CONF_CONTROLLER_ID] controller['configured'] = True return await self.async_step_controller() defaults = { CONF_CONTROLLER_ID: DEFAULT_CONTROLLER_ID, CONF_CONTROLLER_ADDR: DEFAULT_CONTROLLER_ADDR, CONF_CONTROLLER_TIMEZONE: DEFAULT_CONTROLLER_TIMEZONE, } if user_input is not None: for k in [CONF_CONTROLLER_ID, CONF_CONTROLLER_ADDR, CONF_CONTROLLER_TIMEZONE]: if k in user_input: defaults[k] = user_input[k] schema = vol.Schema({ vol.Required(CONF_CONTROLLER_ID, default=defaults[CONF_CONTROLLER_ID]): str, vol.Optional(CONF_CONTROLLER_ADDR, default=defaults[CONF_CONTROLLER_ADDR]): str, vol.Optional(CONF_CONTROLLER_TIMEZONE, default=defaults[CONF_CONTROLLER_TIMEZONE]): str, }) return self.async_show_form(step_id="controller", data_schema=schema, errors=errors, description_placeholders={ "serial_no": controller['serial_no'], }) async def async_step_doors(self, user_input: Optional[Dict[str, Any]] = None): it = next((v for v in self.controllers if not v['doors']), None) if it == None: return await self.async_step_door() else: controller = it['controller'] errors: Dict[str, str] = {} if user_input is not None: if not errors: it['doors'] = { 'doors': [int(f'{v}') for v in user_input['doors']], 'configured': False, } return await self.async_step_doors() doors = [] if re.match('^[1234].*', f"{controller['serial_no']}"): doors.append(1) if re.match('^[234].*', f"{controller['serial_no']}"): doors.append(2) if re.match('^[34].*', f"{controller['serial_no']}"): doors.append(3) if re.match('^[4].*', f"{controller['serial_no']}"): doors.append(4) select = selector.SelectSelectorConfig(options=[{ 'label': f'Door {v}', 'value': f'{v}' } for v in doors], multiple=True, custom_value=False, mode=selector.SelectSelectorMode.LIST) # yapf: disable schema = vol.Schema({ vol.Required('doors', default=[f'{v}' for v in doors]): selector.SelectSelector(select), }) placeholders = { 'controller': f'{controller["name"]}', 'serial_no': f'{controller["serial_no"]}', } return self.async_show_form(step_id="doors", data_schema=schema, errors=errors, description_placeholders=placeholders) async def async_step_door(self, user_input: Optional[Dict[str, Any]] = None): def f(v): return len(v['doors']) > 0 and not v['configured'] it = next((v for v in self.controllers if f(v['doors'])), None) if it == None: return await self.async_step_cards() else: controller = it['controller']['name'] serial_no = it['controller']['serial_no'] doors = it['doors']['doors'] errors: Dict[str, str] = {} if user_input is not None: l = [user_input[f'door{v}_id'] for v in doors] for d in doors: try: k = f'door{d}_id' v = user_input[k] validate_door_id(v, self.options) validate_door_duplicates(v, l) except ValueError as err: errors[k] = f'{err}' if not errors: v = self.options[CONF_DOORS] for d in doors: v.append({ CONF_DOOR_ID: user_input[f'door{d}_id'], CONF_DOOR_CONTROLLER: controller,
_LOGGER = logging.getLogger(__name__) class UhppotedConfigFlow(ConfigFlow, domain=DOMAIN): @staticmethod @callback def async_get_options_flow(config_entry: ConfigEntry) -> UhppotedOptionsFlow: return UhppotedOptionsFlow(config_entry) async def async_step_user(self, user_input: Optional[Dict[str, Any]] = None): defaults = self.hass.data[DOMAIN] if DOMAIN in self.hass.data else {} self.data = {} self.options = {} self.controllers = [] self.doors = [] self.configuration = {'cards': []} self.options.update(get_IPv4(defaults)) self.options.update({ CONF_CONTROLLERS: [], CONF_DOORS: [], }) return await self.async_step_IPv4() async def async_step_IPv4(self, user_input: Optional[Dict[str, Any]] = None): errors: Dict[str, str] = {} if user_input is not None: if not errors: self.options.update(user_input) return await self.async_step_controllers() bind = self.options[CONF_BIND_ADDR] broadcast = self.options[CONF_BROADCAST_ADDR] listen = self.options[CONF_LISTEN_ADDR] debug = self.options[CONF_DEBUG] schema = vol.Schema({ vol.Optional(CONF_BIND_ADDR, default=bind): str, vol.Optional(CONF_BROADCAST_ADDR, default=broadcast): str, vol.Optional(CONF_LISTEN_ADDR, default=listen): str, vol.Optional(CONF_DEBUG, default=debug): bool, }) return self.async_show_form(step_id="IPv4", data_schema=schema, errors=errors) async def async_step_controllers(self, user_input: Optional[Dict[str, Any]] = None): errors: Dict[str, str] = {} if user_input is not None: if not errors: for v in user_input[CONF_CONTROLLERS]: self.controllers.append({ 'controller': { 'name': '', 'serial_no': v, 'configured': False, }, 'doors': None, }) return await self.async_step_controller() controllers = get_all_controllers(self.options) if len(controllers) < 2: for v in controllers: self.controllers.append({ 'controller': { 'name': '', 'serial_no': v, 'configured': False, }, 'doors': None, }) return await self.async_step_controller() schema = vol.Schema({ vol.Required(CONF_CONTROLLERS, default=[f'{v}' for v in controllers]): SelectSelector( SelectSelectorConfig(options=[f'{v}' for v in controllers], multiple=True, custom_value=False, mode=SelectSelectorMode.LIST)), }) return self.async_show_form(step_id="controllers", data_schema=schema, errors=errors) async def async_step_controller(self, user_input: Optional[Dict[str, Any]] = None): it = next((v for v in self.controllers if not v['controller']['configured']), None) if it == None: return await self.async_step_doors() else: controller = it['controller'] errors: Dict[str, str] = {} if user_input is not None: name = user_input[CONF_CONTROLLER_ID] serial_no = controller['serial_no'] address = user_input[CONF_CONTROLLER_ADDR] timezone = user_input[CONF_CONTROLLER_TIMEZONE] try: validate_controller_id(serial_no, name, self.options) except ValueError as err: errors[CONF_CONTROLLER_ID] = f'{err}' if not errors: v = self.options[CONF_CONTROLLERS] v.append({ CONF_CONTROLLER_ID: name, CONF_CONTROLLER_SERIAL_NUMBER: serial_no, CONF_CONTROLLER_ADDR: address, CONF_CONTROLLER_TIMEZONE: timezone, }) self.options.update({CONF_CONTROLLERS: v}) controller['name'] = user_input[CONF_CONTROLLER_ID] controller['configured'] = True return await self.async_step_controller() defaults = { CONF_CONTROLLER_ID: DEFAULT_CONTROLLER_ID, CONF_CONTROLLER_ADDR: DEFAULT_CONTROLLER_ADDR, CONF_CONTROLLER_TIMEZONE: DEFAULT_CONTROLLER_TIMEZONE, } if user_input is not None: for k in [CONF_CONTROLLER_ID, CONF_CONTROLLER_ADDR, CONF_CONTROLLER_TIMEZONE]: if k in user_input: defaults[k] = user_input[k] schema = vol.Schema({ vol.Required(CONF_CONTROLLER_ID, default=defaults[CONF_CONTROLLER_ID]): str, vol.Optional(CONF_CONTROLLER_ADDR, default=defaults[CONF_CONTROLLER_ADDR]): str, vol.Optional(CONF_CONTROLLER_TIMEZONE, default=defaults[CONF_CONTROLLER_TIMEZONE]): str, }) return self.async_show_form(step_id="controller", data_schema=schema, errors=errors, description_placeholders={ "serial_no": controller['serial_no'], }) async def async_step_doors(self, user_input: Optional[Dict[str, Any]] = None): it = next((v for v in self.controllers if not v['doors']), None) if it == None: return await self.async_step_door() else: controller = it['controller'] errors: Dict[str, str] = {} if user_input is not None: if not errors: it['doors'] = { 'doors': [int(f'{v}') for v in user_input['doors']], 'configured': False, } return await self.async_step_doors() doors = [] if re.match('^[1234].*', f"{controller['serial_no']}"): doors.append(1) if re.match('^[234].*', f"{controller['serial_no']}"): doors.append(2) if re.match('^[34].*', f"{controller['serial_no']}"): doors.append(3) if re.match('^[4].*', f"{controller['serial_no']}"): doors.append(4) select = selector.SelectSelectorConfig(options=[{ 'label': f'Door {v}', 'value': f'{v}' } for v in doors], multiple=True, custom_value=False, mode=selector.SelectSelectorMode.LIST) # yapf: disable schema = vol.Schema({ vol.Required('doors', default=[f'{v}' for v in doors]): selector.SelectSelector(select), }) placeholders = { 'controller': f'{controller["name"]}', 'serial_no': f'{controller["serial_no"]}', } return self.async_show_form(step_id="doors", data_schema=schema, errors=errors, description_placeholders=placeholders) async def async_step_door(self, user_input: Optional[Dict[str, Any]] = None): def f(v): return len(v['doors']) > 0 and not v['configured'] it = next((v for v in self.controllers if f(v['doors'])), None) if it == None: return await self.async_step_cards() else: controller = it['controller']['name'] serial_no = it['controller']['serial_no'] doors = it['doors']['doors'] errors: Dict[str, str] = {} if user_input is not None: l = [user_input[f'door{v}_id'] for v in doors] for d in doors: try: k = f'door{d}_id' v = user_input[k] validate_door_id(v, self.options) validate_door_duplicates(v, l) except ValueError as err: errors[k] = f'{err}' if not errors: v = self.options[CONF_DOORS] for d in doors: v.append({ CONF_DOOR_ID: user_input[f'door{d}_id'], CONF_DOOR_CONTROLLER: controller,
CONF_DOOR_NUMBER: int(f'{d}'),
13
2023-11-06 18:46:49+00:00
12k
shadowpa0327/FLORA
main.py
[ { "identifier": "AverageMeter", "path": "my_meter.py", "snippet": "class AverageMeter:\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self):\n self._world_size = dist.get_world_size()\n self.reset()\n\n def reset(self):\n # local\n self._val = 0\n self._sum = 0\n self._count = 0\n # global\n self._history_avg = 0\n self._history_count = 0\n self._avg = None\n\n def update(self, val, n=1):\n self._val = val\n self._sum += val * n\n self._count += n\n self._avg = None\n\n @property\n def val(self):\n return self._val\n\n @property\n def count(self):\n return self._count + self._history_count\n\n @property\n def avg(self):\n if self._avg is None:\n # compute avg\n r = self._history_count / max(1, self._history_count + self._count)\n _avg = self._sum / max(1, self._count)\n self._avg = r * self._history_avg + (1.0 - r) * _avg\n return self._avg\n\n def sync(self):\n buf = torch.tensor([self._sum, self._count],\n dtype=torch.float32).cuda()\n buf = reduce_tensor(buf, 1)\n _sum, _count = buf.tolist()\n _avg = _sum / max(1, _count)\n r = self._history_count / max(1, self._history_count + _count)\n\n self._history_avg = r * self._history_avg + (1.0 - r) * _avg\n self._history_count += _count\n\n self._sum = 0\n self._count = 0\n\n self._avg = None" }, { "identifier": "get_config", "path": "config.py", "snippet": "def get_config(args):\n \"\"\"Get a yacs CfgNode object with default values.\"\"\"\n # Return a clone so that the defaults will not be altered\n # This is for the \"local variable\" use pattern\n config = _C.clone()\n update_config(config, args)\n\n return config" }, { "identifier": "build_model", "path": "models/build.py", "snippet": "def build_model(config):\n model_type = config.MODEL.TYPE\n if model_type == 'swin':\n model = SwinTransformer(\n img_size=config.DATA.IMG_SIZE,\n patch_size=config.MODEL.SWIN.PATCH_SIZE,\n in_chans=config.MODEL.SWIN.IN_CHANS,\n num_classes=config.MODEL.NUM_CLASSES,\n embed_dim=config.MODEL.SWIN.EMBED_DIM,\n depths=config.MODEL.SWIN.DEPTHS,\n num_heads=config.MODEL.SWIN.NUM_HEADS,\n window_size=config.MODEL.SWIN.WINDOW_SIZE,\n mlp_ratio=config.MODEL.SWIN.MLP_RATIO,\n qkv_bias=config.MODEL.SWIN.QKV_BIAS,\n qk_scale=config.MODEL.SWIN.QK_SCALE,\n drop_rate=config.MODEL.DROP_RATE,\n drop_path_rate=config.MODEL.DROP_PATH_RATE,\n ape=config.MODEL.SWIN.APE,\n patch_norm=config.MODEL.SWIN.PATCH_NORM,\n use_checkpoint=config.TRAIN.USE_CHECKPOINT,\n fused_window_process=config.FUSED_WINDOW_PROCESS\n )\n elif model_type == 'deit':\n model = VisionTransformer(\n img_size=config.DATA.IMG_SIZE,\n patch_size=config.MODEL.DEIT.PATCH_SIZE,\n in_chans=config.MODEL.DEIT.IN_CHANS,\n num_classes=config.MODEL.NUM_CLASSES,\n embed_dim=config.MODEL.DEIT.EMBED_DIM,\n depth=config.MODEL.DEIT.DEPTH,\n num_heads = config.MODEL.DEIT.NUM_HEADS,\n mlp_ratio = config.MODEL.DEIT.MLP_RATIO,\n qkv_bias = config.MODEL.DEIT.QKV_BIAS,\n drop_rate=config.MODEL.DROP_RATE,\n drop_path_rate=config.MODEL.DROP_PATH_RATE,\n )\n elif model_type == 'lr_swin':\n model = LRSwinTransformer(\n img_size=config.DATA.IMG_SIZE,\n patch_size=config.MODEL.SWIN.PATCH_SIZE,\n in_chans=config.MODEL.SWIN.IN_CHANS,\n num_classes=config.MODEL.NUM_CLASSES,\n embed_dim=config.MODEL.SWIN.EMBED_DIM,\n depths=config.MODEL.SWIN.DEPTHS,\n num_heads=config.MODEL.SWIN.NUM_HEADS,\n window_size=config.MODEL.SWIN.WINDOW_SIZE,\n mlp_ratio=config.MODEL.SWIN.MLP_RATIO,\n qkv_bias=config.MODEL.SWIN.QKV_BIAS,\n qk_scale=config.MODEL.SWIN.QK_SCALE,\n drop_rate=config.MODEL.DROP_RATE,\n drop_path_rate=config.MODEL.DROP_PATH_RATE,\n ape=config.MODEL.SWIN.APE,\n patch_norm=config.MODEL.SWIN.PATCH_NORM,\n use_checkpoint=config.TRAIN.USE_CHECKPOINT,\n fused_window_process=config.FUSED_WINDOW_PROCESS\n )\n elif model_type == 'lr_swin_subnet':\n model = LRSwinTransformerSubnet(\n svd_config=config.MODEL.SWIN.SVD_CONFIG,\n img_size=config.DATA.IMG_SIZE,\n patch_size=config.MODEL.SWIN.PATCH_SIZE,\n in_chans=config.MODEL.SWIN.IN_CHANS,\n num_classes=config.MODEL.NUM_CLASSES,\n embed_dim=config.MODEL.SWIN.EMBED_DIM,\n depths=config.MODEL.SWIN.DEPTHS,\n num_heads=config.MODEL.SWIN.NUM_HEADS,\n window_size=config.MODEL.SWIN.WINDOW_SIZE,\n mlp_ratio=config.MODEL.SWIN.MLP_RATIO,\n qkv_bias=config.MODEL.SWIN.QKV_BIAS,\n qk_scale=config.MODEL.SWIN.QK_SCALE,\n drop_rate=config.MODEL.DROP_RATE,\n drop_path_rate=config.MODEL.DROP_PATH_RATE,\n ape=config.MODEL.SWIN.APE,\n patch_norm=config.MODEL.SWIN.PATCH_NORM,\n use_checkpoint=config.TRAIN.USE_CHECKPOINT,\n fused_window_process=config.FUSED_WINDOW_PROCESS\n )\n elif model_type == 'lr_deit':\n model = LRVisionTransformer(\n img_size=config.DATA.IMG_SIZE,\n patch_size=config.MODEL.DEIT.PATCH_SIZE,\n in_chans=config.MODEL.DEIT.IN_CHANS,\n num_classes=config.MODEL.NUM_CLASSES,\n embed_dim=config.MODEL.DEIT.EMBED_DIM,\n depth=config.MODEL.DEIT.DEPTH,\n num_heads = config.MODEL.DEIT.NUM_HEADS,\n mlp_ratio = config.MODEL.DEIT.MLP_RATIO,\n qkv_bias = config.MODEL.DEIT.QKV_BIAS,\n drop_rate=config.MODEL.DROP_RATE,\n drop_path_rate=config.MODEL.DROP_PATH_RATE,\n fused_lr=config.MODEL.DEIT.FUSE_LR,\n )\n elif model_type == 'lr_deit_subnet':\n model = LRVisionTransformerSubnet(\n svd_config = config.MODEL.DEIT.SVD_CONFIG,\n img_size=config.DATA.IMG_SIZE,\n patch_size=config.MODEL.DEIT.PATCH_SIZE,\n in_chans=config.MODEL.DEIT.IN_CHANS,\n num_classes=config.MODEL.NUM_CLASSES,\n embed_dim=config.MODEL.DEIT.EMBED_DIM,\n depth=config.MODEL.DEIT.DEPTH,\n num_heads = config.MODEL.DEIT.NUM_HEADS,\n mlp_ratio = config.MODEL.DEIT.MLP_RATIO,\n qkv_bias = config.MODEL.DEIT.QKV_BIAS,\n drop_rate=config.MODEL.DROP_RATE,\n drop_path_rate=config.MODEL.DROP_PATH_RATE,\n )\n else:\n raise NotImplementedError(f\"Unkown model: {model_type}\")\n\n return model" }, { "identifier": "build_loader", "path": "data/build.py", "snippet": "def build_loader(config):\n config.defrost()\n dataset_train, config.MODEL.NUM_CLASSES = build_dataset(\n is_train=True, config=config)\n config.freeze()\n\n print(\n f\"local rank {config.LOCAL_RANK} / global rank {dist.get_rank()} successfully build train dataset\")\n dataset_val, _ = build_dataset(is_train=False, config=config)\n print(\n f\"local rank {config.LOCAL_RANK} / global rank {dist.get_rank()} successfully build val dataset\")\n\n mixup_active = config.AUG.MIXUP > 0 or config.AUG.CUTMIX > 0. or config.AUG.CUTMIX_MINMAX is not None\n\n sampler_train = MyDistributedSampler(\n dataset_train, shuffle=True,\n drop_last=False, padding=True, pair=mixup_active and config.DISTILL.ENABLED,\n )\n\n sampler_val = MyDistributedSampler(\n dataset_val, shuffle=False,\n drop_last=False, padding=False, pair=False,\n )\n\n # TinyViT Dataset Wrapper\n if config.DISTILL.ENABLED:\n dataset_train = DatasetWrapper(dataset_train,\n logits_path=config.DISTILL.TEACHER_LOGITS_PATH,\n topk=config.DISTILL.LOGITS_TOPK,\n write=config.DISTILL.SAVE_TEACHER_LOGITS,\n )\n\n data_loader_train = torch.utils.data.DataLoader(\n dataset_train, sampler=sampler_train,\n batch_size=config.DATA.BATCH_SIZE,\n num_workers=config.DATA.NUM_WORKERS,\n pin_memory=config.DATA.PIN_MEMORY,\n # modified for TinyViT, we save logits of all samples\n drop_last=not config.DISTILL.SAVE_TEACHER_LOGITS,\n )\n\n data_loader_val = torch.utils.data.DataLoader(\n dataset_val, sampler=sampler_val,\n batch_size=int(config.DATA.BATCH_SIZE*1.5),\n shuffle=False,\n num_workers=config.DATA.NUM_WORKERS,\n pin_memory=config.DATA.PIN_MEMORY,\n drop_last=False\n )\n\n # setup mixup / cutmix\n mixup_fn = None\n if mixup_active:\n mixup_t = Mixup if not config.DISTILL.ENABLED else Mixup_record\n if config.DISTILL.ENABLED and config.AUG.MIXUP_MODE != \"pair2\":\n # change to pair2 mode for saving logits\n config.defrost()\n config.AUG.MIXUP_MODE = 'pair2'\n config.freeze()\n mixup_fn = mixup_t(\n mixup_alpha=config.AUG.MIXUP, cutmix_alpha=config.AUG.CUTMIX, cutmix_minmax=config.AUG.CUTMIX_MINMAX,\n prob=config.AUG.MIXUP_PROB, switch_prob=config.AUG.MIXUP_SWITCH_PROB, mode=config.AUG.MIXUP_MODE,\n label_smoothing=config.MODEL.LABEL_SMOOTHING, num_classes=config.MODEL.NUM_CLASSES)\n\n return dataset_train, dataset_val, data_loader_train, data_loader_val, mixup_fn" }, { "identifier": "build_scheduler", "path": "lr_scheduler.py", "snippet": "def build_scheduler(config, optimizer, n_iter_per_epoch):\n num_steps = int(config.TRAIN.EPOCHS * n_iter_per_epoch)\n warmup_steps = int(config.TRAIN.WARMUP_EPOCHS * n_iter_per_epoch)\n decay_steps = int(\n config.TRAIN.LR_SCHEDULER.DECAY_EPOCHS * n_iter_per_epoch)\n\n lr_scheduler = None\n if config.TRAIN.LR_SCHEDULER.NAME == 'cosine':\n lr_scheduler = CosineLRScheduler(\n optimizer,\n t_initial=num_steps,\n lr_min=config.TRAIN.MIN_LR,\n warmup_lr_init=config.TRAIN.WARMUP_LR,\n warmup_t=warmup_steps,\n cycle_limit=1,\n t_in_epochs=False,\n )\n elif config.TRAIN.LR_SCHEDULER.NAME == 'linear':\n lr_scheduler = LinearLRScheduler(\n optimizer,\n t_initial=num_steps,\n lr_min_rate=0.01,\n warmup_lr_init=config.TRAIN.WARMUP_LR,\n warmup_t=warmup_steps,\n t_in_epochs=False,\n )\n elif config.TRAIN.LR_SCHEDULER.NAME == 'step':\n lr_scheduler = StepLRScheduler(\n optimizer,\n decay_t=decay_steps,\n decay_rate=config.TRAIN.LR_SCHEDULER.DECAY_RATE,\n warmup_lr_init=config.TRAIN.WARMUP_LR,\n warmup_t=warmup_steps,\n t_in_epochs=False,\n )\n return lr_scheduler" }, { "identifier": "build_optimizer", "path": "optimizer.py", "snippet": "def build_optimizer(config, model):\n \"\"\"\n Build optimizer, set weight decay of normalization to 0 by default.\n \"\"\"\n skip = {}\n skip_keywords = {}\n if hasattr(model, 'no_weight_decay'):\n skip = model.no_weight_decay()\n if hasattr(model, 'no_weight_decay_keywords'):\n skip_keywords = model.no_weight_decay_keywords()\n parameters = set_weight_decay(model, skip, skip_keywords)\n\n opt_lower = config.TRAIN.OPTIMIZER.NAME.lower()\n optimizer = None\n if opt_lower == 'sgd':\n optimizer = optim.SGD(parameters, momentum=config.TRAIN.OPTIMIZER.MOMENTUM, nesterov=True,\n lr=config.TRAIN.BASE_LR, weight_decay=config.TRAIN.WEIGHT_DECAY)\n elif opt_lower == 'adamw':\n optimizer = optim.AdamW(parameters, eps=config.TRAIN.OPTIMIZER.EPS, betas=config.TRAIN.OPTIMIZER.BETAS,\n lr=config.TRAIN.BASE_LR, weight_decay=config.TRAIN.WEIGHT_DECAY)\n\n return optimizer" }, { "identifier": "create_logger", "path": "logger.py", "snippet": "@functools.lru_cache()\ndef create_logger(output_dir, dist_rank=0, name=''):\n # create logger\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n logger.propagate = False\n\n # create formatter\n fmt = '[%(asctime)s %(name)s] (%(filename)s %(lineno)d): %(levelname)s %(message)s'\n color_fmt = colored('[%(asctime)s %(name)s]', 'green') + \\\n colored('(%(filename)s %(lineno)d)', 'yellow') + \\\n ': %(levelname)s %(message)s'\n\n # create console handlers for master process\n if dist_rank == 0:\n console_handler = logging.StreamHandler(sys.stdout)\n console_handler.setLevel(logging.DEBUG)\n console_handler.setFormatter(\n logging.Formatter(fmt=color_fmt, datefmt='%Y-%m-%d %H:%M:%S'))\n logger.addHandler(console_handler)\n\n # create file handlers\n file_handler = logging.FileHandler(os.path.join(\n output_dir, f'log_rank{dist_rank}.txt'), mode='a')\n file_handler.setLevel(logging.DEBUG)\n file_handler.setFormatter(logging.Formatter(\n fmt=fmt, datefmt='%Y-%m-%d %H:%M:%S'))\n logger.addHandler(file_handler)\n\n return logger" }, { "identifier": "load_checkpoint", "path": "utils.py", "snippet": "def load_checkpoint(config, model, optimizer, lr_scheduler, loss_scaler, logger, search_space = None):\n logger.info(\n f\"==============> Resuming form {config.MODEL.RESUME}....................\")\n if config.MODEL.RESUME.startswith('https'):\n checkpoint = torch.hub.load_state_dict_from_url(\n config.MODEL.RESUME, map_location='cpu', check_hash=True)\n else:\n checkpoint = torch.load(config.MODEL.RESUME, map_location='cpu')\n\n params = checkpoint['model']\n now_model_state = model.state_dict()\n mnames = ['head.weight', 'head.bias'] # (cls, 1024), (cls, )\n if mnames[-1] in params:\n ckpt_head_bias = params[mnames[-1]]\n now_model_bias = now_model_state[mnames[-1]]\n if ckpt_head_bias.shape != now_model_bias.shape:\n num_classes = 1000\n\n if len(ckpt_head_bias) == 21841 and len(now_model_bias) == num_classes:\n logger.info(\"Convert checkpoint from 21841 to 1k\")\n # convert 22kto1k\n fname = './imagenet_1kto22k.txt'\n with open(fname) as fin:\n mapping = torch.Tensor(\n list(map(int, fin.readlines()))).to(torch.long)\n for name in mnames:\n v = params[name]\n shape = list(v.shape)\n shape[0] = num_classes\n mean_v = v[mapping[mapping != -1]].mean(0, keepdim=True)\n v = torch.cat([v, mean_v], 0)\n v = v[mapping]\n params[name] = v\n\n msg = model.load_state_dict(params, strict=False)\n logger.info(msg)\n max_accuracy = 0.0\n if not config.EVAL_MODE:\n if 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint:\n if optimizer is not None:\n optimizer.load_state_dict(checkpoint['optimizer'])\n if lr_scheduler is not None:\n lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])\n if 'scaler' in checkpoint:\n loss_scaler.load_state_dict(checkpoint['scaler'])\n logger.info(\n f\"=> loaded successfully '{config.MODEL.RESUME}' (epoch {checkpoint['epoch']})\")\n if 'max_accuracy' in checkpoint:\n max_accuracy = checkpoint['max_accuracy']\n \n if 'search_space' in checkpoint and search_space is not None:\n search_space.load_state_dict(checkpoint['search_space'])\n logger.info(\n f\"=> Found existing search space: {search_space})\")\n logger.info(\n f\"=> loaded search space successfully\")\n \n if 'epoch' in checkpoint:\n config.defrost()\n config.TRAIN.START_EPOCH = checkpoint['epoch'] + 1\n config.freeze()\n\n del checkpoint\n torch.cuda.empty_cache()\n return max_accuracy" }, { "identifier": "load_pretrained", "path": "utils.py", "snippet": "def load_pretrained(config, model, logger):\n logger.info(\n f\"==============> Loading weight {config.MODEL.PRETRAINED} for fine-tuning......\")\n checkpoint = torch.load(config.MODEL.PRETRAINED, map_location='cpu')\n state_dict = checkpoint['model']\n\n # delete relative_position_index since we always re-init it\n relative_position_index_keys = [\n k for k in state_dict.keys() if \"relative_position_index\" in k]\n for k in relative_position_index_keys:\n del state_dict[k]\n\n # delete relative_coords_table since we always re-init it\n relative_position_index_keys = [\n k for k in state_dict.keys() if \"relative_coords_table\" in k]\n for k in relative_position_index_keys:\n del state_dict[k]\n\n # delete attn_mask since we always re-init it\n attn_mask_keys = [k for k in state_dict.keys() if \"attn_mask\" in k]\n for k in attn_mask_keys:\n del state_dict[k]\n\n model_state_dict = model.state_dict()\n\n # bicubic interpolate relative_position_bias_table if not match\n relative_position_bias_table_keys = [\n k for k in state_dict.keys() if \"relative_position_bias_table\" in k]\n for k in relative_position_bias_table_keys:\n relative_position_bias_table_pretrained = state_dict[k]\n relative_position_bias_table_current = model_state_dict[k]\n L1, nH1 = relative_position_bias_table_pretrained.size()\n L2, nH2 = relative_position_bias_table_current.size()\n if nH1 != nH2:\n logger.warning(f\"Error in loading {k}, passing......\")\n else:\n if L1 != L2:\n # bicubic interpolate relative_position_bias_table if not match\n S1 = int(L1 ** 0.5)\n S2 = int(L2 ** 0.5)\n relative_position_bias_table_pretrained_resized = torch.nn.functional.interpolate(\n relative_position_bias_table_pretrained.permute(1, 0).view(1, nH1, S1, S1), size=(S2, S2),\n mode='bicubic')\n state_dict[k] = relative_position_bias_table_pretrained_resized.view(\n nH2, L2).permute(1, 0)\n\n # bicubic interpolate attention_biases if not match\n relative_position_bias_table_keys = [\n k for k in state_dict.keys() if \"attention_biases\" in k]\n for k in relative_position_bias_table_keys:\n relative_position_bias_table_pretrained = state_dict[k]\n relative_position_bias_table_current = model_state_dict[k]\n nH1, L1 = relative_position_bias_table_pretrained.size()\n nH2, L2 = relative_position_bias_table_current.size()\n if nH1 != nH2:\n logger.warning(f\"Error in loading {k}, passing......\")\n else:\n if L1 != L2:\n # bicubic interpolate relative_position_bias_table if not match\n S1 = int(L1 ** 0.5)\n S2 = int(L2 ** 0.5)\n relative_position_bias_table_pretrained_resized = torch.nn.functional.interpolate(\n relative_position_bias_table_pretrained.view(1, nH1, S1, S1), size=(S2, S2),\n mode='bicubic')\n state_dict[k] = relative_position_bias_table_pretrained_resized.view(\n nH2, L2)\n\n # bicubic interpolate absolute_pos_embed if not match\n absolute_pos_embed_keys = [\n k for k in state_dict.keys() if \"absolute_pos_embed\" in k]\n for k in absolute_pos_embed_keys:\n # dpe\n absolute_pos_embed_pretrained = state_dict[k]\n absolute_pos_embed_current = model.state_dict()[k]\n _, L1, C1 = absolute_pos_embed_pretrained.size()\n _, L2, C2 = absolute_pos_embed_current.size()\n if C1 != C1:\n logger.warning(f\"Error in loading {k}, passing......\")\n else:\n if L1 != L2:\n S1 = int(L1 ** 0.5)\n S2 = int(L2 ** 0.5)\n absolute_pos_embed_pretrained = absolute_pos_embed_pretrained.reshape(\n -1, S1, S1, C1)\n absolute_pos_embed_pretrained = absolute_pos_embed_pretrained.permute(\n 0, 3, 1, 2)\n absolute_pos_embed_pretrained_resized = torch.nn.functional.interpolate(\n absolute_pos_embed_pretrained, size=(S2, S2), mode='bicubic')\n absolute_pos_embed_pretrained_resized = absolute_pos_embed_pretrained_resized.permute(\n 0, 2, 3, 1)\n absolute_pos_embed_pretrained_resized = absolute_pos_embed_pretrained_resized.flatten(\n 1, 2)\n state_dict[k] = absolute_pos_embed_pretrained_resized\n\n # check classifier, if not match, then re-init classifier to zero\n head_bias_pretrained = state_dict['head.bias']\n Nc1 = head_bias_pretrained.shape[0]\n Nc2 = model.head.bias.shape[0]\n if (Nc1 != Nc2):\n if Nc1 == 21841 and Nc2 == 1000:\n logger.info(\"loading ImageNet-21841 weight to ImageNet-1K ......\")\n map22kto1k_path = f'./imagenet_1kto22k.txt'\n with open(map22kto1k_path) as fin:\n mapping = torch.Tensor(\n list(map(int, fin.readlines()))).to(torch.long)\n for name in ['head.weight', 'head.bias']:\n v = state_dict[name]\n mean_v = v[mapping[mapping != -1]].mean(0, keepdim=True)\n v = torch.cat([v, mean_v], 0)\n v = v[mapping]\n state_dict[name] = v\n else:\n torch.nn.init.constant_(model.head.bias, 0.)\n torch.nn.init.constant_(model.head.weight, 0.)\n del state_dict['head.weight']\n del state_dict['head.bias']\n logger.warning(\n f\"Error in loading classifier head, re-init classifier head to 0\")\n\n msg = model.load_state_dict(state_dict, strict=False)\n logger.warning(msg)\n\n logger.info(f\"=> loaded successfully '{config.MODEL.PRETRAINED}'\")\n\n del checkpoint\n torch.cuda.empty_cache()" }, { "identifier": "save_checkpoint", "path": "utils.py", "snippet": "def save_checkpoint(config, epoch, model, max_accuracy, optimizer, lr_scheduler, loss_scaler, logger, search_space = None):\n save_state = {'model': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'lr_scheduler': lr_scheduler.state_dict(),\n 'max_accuracy': max_accuracy,\n 'scaler': loss_scaler.state_dict(),\n 'epoch': epoch,\n 'config': config}\n if search_space:\n save_state['search_space'] = search_space.state_dict()\n save_path = os.path.join(config.OUTPUT, f'ckpt_epoch_{epoch}.pth')\n logger.info(f\"{save_path} saving......\")\n torch.save(save_state, save_path)\n logger.info(f\"{save_path} saved !!!\")" }, { "identifier": "NativeScalerWithGradNormCount", "path": "utils.py", "snippet": "class NativeScalerWithGradNormCount:\n state_dict_key = \"amp_scaler\"\n\n def __init__(self):\n self._scaler = torch.cuda.amp.GradScaler()\n\n def __call__(self, loss, optimizer, clip_grad=None, parameters=None, create_graph=False, update_grad=True):\n self._scaler.scale(loss).backward(create_graph=create_graph)\n if update_grad:\n if clip_grad is not None:\n assert parameters is not None\n # unscale the gradients of optimizer's assigned params in-place\n self._scaler.unscale_(optimizer)\n norm = torch.nn.utils.clip_grad_norm_(parameters, clip_grad)\n else:\n self._scaler.unscale_(optimizer)\n norm = ampscaler_get_grad_norm(parameters)\n self._scaler.step(optimizer)\n self._scaler.update()\n else:\n norm = None\n return norm\n\n def state_dict(self):\n return self._scaler.state_dict()\n\n def load_state_dict(self, state_dict):\n self._scaler.load_state_dict(state_dict)" }, { "identifier": "auto_resume_helper", "path": "utils.py", "snippet": "def auto_resume_helper(output_dir):\n checkpoints = os.listdir(output_dir)\n checkpoints = [ckpt for ckpt in checkpoints if ckpt.endswith('pth')]\n print(f\"All checkpoints founded in {output_dir}: {checkpoints}\")\n if len(checkpoints) > 0:\n latest_checkpoint = max([os.path.join(output_dir, d)\n for d in checkpoints], key=os.path.getmtime)\n print(f\"The latest checkpoint founded: {latest_checkpoint}\")\n resume_file = latest_checkpoint\n else:\n resume_file = None\n return resume_file" }, { "identifier": "is_main_process", "path": "utils.py", "snippet": "def is_main_process():\n return dist.get_rank() == 0" }, { "identifier": "get_git_info", "path": "utils.py", "snippet": "def get_git_info():\n return dict(\n branch=get_cmd_output('git name-rev --name-only HEAD'),\n git_hash=get_cmd_output('git rev-parse HEAD'),\n )" }, { "identifier": "run_cmd", "path": "utils.py", "snippet": "def run_cmd(cmd):\n return subprocess.check_output(cmd.split(), universal_newlines=True).strip()" }, { "identifier": "build_low_rank_search_space", "path": "nas_utils/rank_choices_manager.py", "snippet": "def build_low_rank_search_space(args, config, force_uniform = False):\n per_block_searched_configs = None\n if config.NAS.LSSS.SEARCHED_CFG_PATH:\n import pickle\n with open(config.NAS.LSSS.SEARCHED_CFG_PATH, 'rb') as file:\n per_block_searched_configs = pickle.load(file)\n \n return LowRankSearchSpace(\n rank_choices = config.NAS.SEARCH_SPACE,\n num_blocks = config.NAS.NUM_BLOCKS,\n choices_per_blocks = config.NAS.NUM_CHOICES_PER_BLOCKS,\n is_non_uniform = False if force_uniform else config.NAS.NON_UNIFORM,\n per_block_searched_configs = per_block_searched_configs\n ) " } ]
import os import time import random import argparse import datetime import numpy as np import torch import torch.backends.cudnn as cudnn import torch.distributed as dist import wandb from collections import defaultdict from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy from timm.utils import accuracy from my_meter import AverageMeter from config import get_config from models import build_model from data import build_loader from lr_scheduler import build_scheduler from optimizer import build_optimizer from logger import create_logger from utils import load_checkpoint, load_pretrained, save_checkpoint,\ NativeScalerWithGradNormCount,\ auto_resume_helper, is_main_process,\ get_git_info, run_cmd from nas_utils import build_low_rank_search_space
7,863
"--opts", help="Modify config options by adding 'KEY VALUE' pairs. ", default=None, nargs='+', ) # easy config modification parser.add_argument('--batch-size', type=int, help="batch size for single GPU") parser.add_argument('--data-path', type=str, help='path to dataset') parser.add_argument('--pretrained', help='pretrained weight from checkpoint, could be imagenet22k pretrained weight') parser.add_argument('--resume', help='resume from checkpoint') parser.add_argument('--accumulation-steps', type=int, help="gradient accumulation steps") parser.add_argument('--use-checkpoint', action='store_true', help="whether to use gradient checkpointing to save memory") parser.add_argument('--disable_amp', action='store_true', help='Disable pytorch amp') parser.add_argument('--output', default='output', type=str, metavar='PATH', help='root of output folder, the full path is <output>/<model_name>/<tag> (default: output)') parser.add_argument('--tag', help='tag of experiment') parser.add_argument('--eval', action='store_true', help='Perform evaluation only') parser.add_argument('--throughput', action='store_true', help='Test throughput only') parser.add_argument('--use-sync-bn', action='store_true', default=False, help='sync bn') parser.add_argument('--use-wandb', action='store_true', default=False, help='use wandb to record log') # distributed training parser.add_argument("--local_rank", type=int, help='local rank for DistributedDataParallel') # NAS parser.add_argument("--lsss", action='store_true', help = 'train only the local supernet', default=False) parser.add_argument("--lsss-bid", type = int, help = "block id for the target transformer blocks", default = -1) args = parser.parse_args() config = get_config(args) return args, config def main(args, config): dataset_train, dataset_val, data_loader_train, data_loader_val, mixup_fn = build_loader( config) supernet_config = config.NAS.SEARCH_SPACE smallest_config = [] for ratios in supernet_config: smallest_config.append(ratios[0]) logger.info(f"Creating model:{config.MODEL.TYPE}/{config.MODEL.NAME}") model = build_model(config) model.cuda() if args.use_sync_bn: model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) logger.info(str(model)) optimizer = build_optimizer(config, model) if 'classic' in config.MODEL.TYPE: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[config.LOCAL_RANK], broadcast_buffers=False, find_unused_parameters = True) else: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[config.LOCAL_RANK], broadcast_buffers=False, find_unused_parameters = False) loss_scaler = NativeScalerWithGradNormCount() model_without_ddp = model.module low_rank_search_space = build_low_rank_search_space(args, config) if config.NAS.ENABLE: if config.NAS.INIT_CONFIG is None: cfg = low_rank_search_space.get_smallest_config() else: cfg = config.NAS.INIT_CONFIG model_without_ddp.set_sample_config(cfg) if config.NAS.LSSS.ENABLE: logger.info(f"=> Now training the local supernet of block-{config.NAS.LSSS.BLOCK_ID}") else: logger.info(f"=> Srarting supernet training !") logger.info(f"") logger.info(f"=> Set init subnet config to be {cfg}") logger.info(str(model)) n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad) logger.info(f"number of params: {n_parameters}") if hasattr(model_without_ddp, 'flops'): flops = model_without_ddp.flops() logger.info(f"number of GFLOPs: {flops / 1e9}") lr_scheduler = build_scheduler(config, optimizer, len( data_loader_train) // config.TRAIN.ACCUMULATION_STEPS) if config.DISTILL.ENABLED: # we disable MIXUP and CUTMIX when knowledge distillation assert len( config.DISTILL.TEACHER_LOGITS_PATH) > 0, "Please fill in DISTILL.TEACHER_LOGITS_PATH" criterion = torch.nn.CrossEntropyLoss(reduction='mean') else: if config.AUG.MIXUP > 0.: # smoothing is handled with mixup label transform criterion = SoftTargetCrossEntropy() elif config.MODEL.LABEL_SMOOTHING > 0.: criterion = LabelSmoothingCrossEntropy( smoothing=config.MODEL.LABEL_SMOOTHING) else: criterion = torch.nn.CrossEntropyLoss() max_accuracy = 0.0 if config.TRAIN.AUTO_RESUME:
# -------------------------------------------------------- # Based on the code: TinyViT # (https://github.com/microsoft/Cream/tree/main/TinyViT) # Add Low Rank Supernet Training # -------------------------------------------------------- try: except ImportError: wandb = None NORM_ITER_LEN = 100 def parse_option(): parser = argparse.ArgumentParser( 'Swin Transformer training and evaluation script', add_help=False) parser.add_argument('--cfg', type=str, required=True, metavar="FILE", help='path to config file', ) parser.add_argument( "--opts", help="Modify config options by adding 'KEY VALUE' pairs. ", default=None, nargs='+', ) # easy config modification parser.add_argument('--batch-size', type=int, help="batch size for single GPU") parser.add_argument('--data-path', type=str, help='path to dataset') parser.add_argument('--pretrained', help='pretrained weight from checkpoint, could be imagenet22k pretrained weight') parser.add_argument('--resume', help='resume from checkpoint') parser.add_argument('--accumulation-steps', type=int, help="gradient accumulation steps") parser.add_argument('--use-checkpoint', action='store_true', help="whether to use gradient checkpointing to save memory") parser.add_argument('--disable_amp', action='store_true', help='Disable pytorch amp') parser.add_argument('--output', default='output', type=str, metavar='PATH', help='root of output folder, the full path is <output>/<model_name>/<tag> (default: output)') parser.add_argument('--tag', help='tag of experiment') parser.add_argument('--eval', action='store_true', help='Perform evaluation only') parser.add_argument('--throughput', action='store_true', help='Test throughput only') parser.add_argument('--use-sync-bn', action='store_true', default=False, help='sync bn') parser.add_argument('--use-wandb', action='store_true', default=False, help='use wandb to record log') # distributed training parser.add_argument("--local_rank", type=int, help='local rank for DistributedDataParallel') # NAS parser.add_argument("--lsss", action='store_true', help = 'train only the local supernet', default=False) parser.add_argument("--lsss-bid", type = int, help = "block id for the target transformer blocks", default = -1) args = parser.parse_args() config = get_config(args) return args, config def main(args, config): dataset_train, dataset_val, data_loader_train, data_loader_val, mixup_fn = build_loader( config) supernet_config = config.NAS.SEARCH_SPACE smallest_config = [] for ratios in supernet_config: smallest_config.append(ratios[0]) logger.info(f"Creating model:{config.MODEL.TYPE}/{config.MODEL.NAME}") model = build_model(config) model.cuda() if args.use_sync_bn: model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) logger.info(str(model)) optimizer = build_optimizer(config, model) if 'classic' in config.MODEL.TYPE: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[config.LOCAL_RANK], broadcast_buffers=False, find_unused_parameters = True) else: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[config.LOCAL_RANK], broadcast_buffers=False, find_unused_parameters = False) loss_scaler = NativeScalerWithGradNormCount() model_without_ddp = model.module low_rank_search_space = build_low_rank_search_space(args, config) if config.NAS.ENABLE: if config.NAS.INIT_CONFIG is None: cfg = low_rank_search_space.get_smallest_config() else: cfg = config.NAS.INIT_CONFIG model_without_ddp.set_sample_config(cfg) if config.NAS.LSSS.ENABLE: logger.info(f"=> Now training the local supernet of block-{config.NAS.LSSS.BLOCK_ID}") else: logger.info(f"=> Srarting supernet training !") logger.info(f"") logger.info(f"=> Set init subnet config to be {cfg}") logger.info(str(model)) n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad) logger.info(f"number of params: {n_parameters}") if hasattr(model_without_ddp, 'flops'): flops = model_without_ddp.flops() logger.info(f"number of GFLOPs: {flops / 1e9}") lr_scheduler = build_scheduler(config, optimizer, len( data_loader_train) // config.TRAIN.ACCUMULATION_STEPS) if config.DISTILL.ENABLED: # we disable MIXUP and CUTMIX when knowledge distillation assert len( config.DISTILL.TEACHER_LOGITS_PATH) > 0, "Please fill in DISTILL.TEACHER_LOGITS_PATH" criterion = torch.nn.CrossEntropyLoss(reduction='mean') else: if config.AUG.MIXUP > 0.: # smoothing is handled with mixup label transform criterion = SoftTargetCrossEntropy() elif config.MODEL.LABEL_SMOOTHING > 0.: criterion = LabelSmoothingCrossEntropy( smoothing=config.MODEL.LABEL_SMOOTHING) else: criterion = torch.nn.CrossEntropyLoss() max_accuracy = 0.0 if config.TRAIN.AUTO_RESUME:
resume_file = auto_resume_helper(config.OUTPUT)
11
2023-11-03 09:54:45+00:00
12k
fw-ai/fireworks_poe_bot
fireworks_poe_bot/__main__.py
[ { "identifier": "FireworksPoeTextBot", "path": "fireworks_poe_bot/fw_poe_text_bot.py", "snippet": "class FireworksPoeTextBot(PoeBot):\n def __init__(\n self,\n model: str,\n api_key: str,\n environment: str,\n deployment: str,\n server_version: str,\n allow_attachments: bool,\n input_image_size: int,\n prompt_truncate_len: int,\n max_tokens: int,\n system_prompt_override: Optional[str],\n additional_args: Optional[Dict[str, int | str]],\n chat_format: Optional[str],\n alpaca_instruction_msg: Optional[str],\n completion_async_method: Callable = ChatCompletion.acreate,\n ):\n super().__init__()\n self.model = model\n self.api_key = api_key\n self.environment = environment\n self.deployment = deployment\n self.server_version = server_version\n self.input_image_size = input_image_size\n self.completion_async_method = completion_async_method\n self.allow_attachments = allow_attachments\n self.prompt_truncate_len = prompt_truncate_len\n self.max_tokens = max_tokens\n self.chat_format = chat_format\n self.alpaca_instruction_msg = alpaca_instruction_msg\n self.system_prompt_override = system_prompt_override\n self.additional_args = additional_args or {}\n\n def _log_warn(self, payload: Dict):\n payload = copy.copy(payload)\n payload.update(\n {\n \"severity\": \"WARNING\",\n \"environment\": self.environment,\n \"deployment\": self.deployment,\n \"model\": self.model,\n \"server_version\": self.server_version,\n }\n )\n log_warn(payload)\n\n def _log_info(self, payload: Dict):\n payload = copy.copy(payload)\n payload.update(\n {\n \"severity\": \"INFO\",\n \"environment\": self.environment,\n \"deployment\": self.deployment,\n \"model\": self.model,\n \"server_version\": self.server_version,\n }\n )\n log_info(payload)\n\n async def download_image_and_encode_to_base64(\n self,\n url: str,\n ) -> str:\n async with httpx.AsyncClient() as client:\n image_download_start = time.perf_counter()\n r = await client.get(url)\n image_download_end = time.perf_counter()\n if r.status_code == 200:\n resize_encode_start = time.perf_counter()\n pil_img = Image.open(io.BytesIO(r.content))\n pil_img = pil_img.convert(\"RGB\")\n width, height = pil_img.size\n if width >= height:\n new_size = (\n self.input_image_size,\n int(height * self.input_image_size / width),\n )\n else:\n new_size = (\n int(width * self.input_image_size / height),\n self.input_image_size,\n )\n pil_img_resized = pil_img.resize(new_size)\n buffered = io.BytesIO()\n pil_img_resized.save(buffered, format=\"JPEG\")\n img_buffer = buffered.getvalue()\n img = \"data:image/jpeg;base64,{}\".format(\n base64.b64encode(img_buffer).decode(\"utf-8\")\n )\n resize_encode_end = time.perf_counter()\n self._log_info(\n {\n \"download_image_ms\": int(\n (image_download_end - image_download_start) * 1000\n ),\n \"encode_image_ms\": int(\n (resize_encode_end - resize_encode_start) * 1000\n ),\n \"url\": url,\n }\n )\n return img\n raise Exception(f\"Unable to download image, error code {r.status_code}\")\n\n async def get_response(\n self, query: QueryRequest\n ) -> AsyncIterable[Union[PartialResponse, ServerSentEvent]]:\n if len(query.query) == 0:\n yield ErrorResponse(allow_retry=False, text=\"Empty query\")\n return\n\n orig_api_key = fireworks.client.api_key\n fireworks.client.api_key = self.api_key\n try:\n start_t = time.time()\n messages: List[ChatMessage] = []\n\n cumulative_image_size_mb = 0\n for protocol_message in query.query:\n log_msg = protocol_message.dict()\n\n # OpenAI/Fireworks use the \"assistant\" role for the LLM, but Poe uses the\n # \"bot\" role. Replace that one. Otherwise, ignore the role\n if protocol_message.role not in {\"system\", \"user\", \"bot\"}:\n self._log_warn({\"msg\": \"Unknown role\", **log_msg})\n continue\n if protocol_message.content_type not in {\"text/plain\", \"text/markdown\"}:\n self._log_warn({\"msg\": \"Unknown content type\", **log_msg})\n continue\n # TODO: support protocol_message.feedback and protocol_message.attachments\n # if needed\n img_base64 = None\n if protocol_message.role == \"bot\":\n role = \"assistant\"\n else:\n role = protocol_message.role\n if protocol_message.attachments and protocol_message.attachments[\n 0\n ].content_type in [\"image/png\", \"image/jpeg\"]:\n try:\n img_base64 = await self.download_image_and_encode_to_base64(\n protocol_message.attachments[0].url\n )\n except Exception as e:\n yield ErrorResponse(allow_retry=False, text=str(e))\n raise RuntimeError(str(e))\n\n if img_base64:\n if cumulative_image_size_mb > 8:\n # Apigee has a limit of 10MB for payload, we set image total limit to 8MB\n yield ErrorResponse(\n allow_retry=False, text=\"The total image size is too big\"\n )\n raise RuntimeError(\"The total image size is too big\")\n messages.append(\n {\n \"role\": role,\n \"content\": [\n {\"type\": \"text\", \"text\": protocol_message.content},\n {\n \"type\": \"image_url\",\n \"image_url\": {\"url\": img_base64},\n },\n ],\n }\n )\n cumulative_image_size_mb += len(img_base64) / 1024 / 1024\n else:\n messages.append({\"role\": role, \"content\": protocol_message.content})\n\n if self.system_prompt_override is not None:\n system_prompt_msg = None\n for msg in messages:\n if msg[\"role\"] == \"system\":\n system_prompt_msg = msg\n break\n if system_prompt_msg is None:\n system_prompt_msg = {\n \"role\": \"system\",\n }\n messages.insert(0, system_prompt_msg)\n\n system_prompt_msg[\"content\"] = [\n {\"type\": \"text\", \"text\": self.system_prompt_override},\n ]\n\n if self.chat_format == \"alpaca\":\n # Discard all messages except \"system\" and the last \"user\"\n # message\n system_message = None\n user_message = None\n for msg in messages:\n if msg[\"role\"] == \"system\":\n system_message = msg\n elif msg[\"role\"] == \"user\":\n user_message = msg\n\n new_messages = []\n if system_message is not None:\n new_messages.append(system_message)\n # Insert instruction message, if applicable\n if self.alpaca_instruction_msg is not None:\n new_messages.append(\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"text\", \"text\": self.alpaca_instruction_msg}\n ],\n }\n )\n if user_message is not None:\n user_message[\"role\"] = \"input\"\n # HACKS: move the image to the instruction message\n if isinstance(user_message[\"content\"], list):\n content_non_image = [x for x in user_message['content'] if (not isinstance(x, dict)) or x[\"type\"] != \"image_url\"]\n content_image = [x for x in user_message['content'] if isinstance(x, dict) and x[\"type\"] == \"image_url\"]\n if content_image:\n new_messages[-1][\"content\"].append(content_image[0])\n user_message[\"content\"] = content_non_image\n new_messages.append(user_message)\n else:\n if user_message is not None:\n new_messages.append(user_message)\n messages = new_messages\n\n self._log_info(\n {\n \"msg\": \"Request received\",\n **query.dict(),\n }\n )\n\n if self.chat_format != \"alpaca\":\n # The poe servers send us arbitrary lists of messages. We need to do a few things\n # to normalize for our chat completion API:\n # 1. Ensure that all assistant messages are preceded by a user message\n # 2. Merge adjacent messages from the same role\n # 3. Ensure that the last message is a user message\n\n # Ensure that all assistant messages are preceded by a user message\n for i in range(len(messages) - 1, -1, -1):\n if messages[i][\"role\"] == \"assistant\" and (\n i == 0 or messages[i - 1][\"role\"] != \"user\"\n ):\n self._log_warn(\n {\n \"msg\": f\"Assistant message {messages[i]} not preceded by user message\"\n }\n )\n messages.insert(i, {\"role\": \"user\", \"content\": \"\"})\n\n # Merge adjacent messages from the same role\n merged_messages = []\n\n # Now there could be images in the messages, in which case the message content is a list\n def merge_messages_groups(\n message_group: List[Union[str, List[Dict[str, Any]]]]\n ) -> Union[str, List[Dict[str, Any]]]:\n text = []\n images = []\n for msg in message_group:\n if isinstance(msg, str):\n text.append(msg)\n elif isinstance(msg, list):\n assert msg[0][\"type\"] == \"text\"\n text.append(msg[0][\"text\"])\n images.extend(msg[1:])\n if images:\n return [{\"type\": \"text\", \"text\": \" \".join(text)}, *images]\n return \" \".join(text)\n\n for role, group in groupby(messages, key=lambda x: x[\"role\"]):\n content = merge_messages_groups([message[\"content\"] for message in group])\n merged_messages.append({\"role\": role, \"content\": content})\n\n messages = merged_messages\n\n # Ensure last message is a user message\n if messages[-1][\"role\"] != \"user\":\n self._log_warn({\"msg\": f\"Last message {messages[-1]} not a user message\"})\n messages.append({\"role\": \"user\", \"content\": \"\"})\n\n additional_args = copy.deepcopy(self.additional_args)\n if \"stop\" in additional_args:\n stop_seqs = additional_args[\"stop\"]\n additional_args.pop(\"stop\")\n else:\n stop_seqs = query.stop_sequences[:4]\n generated_len = 0\n complete_response = \"\"\n async for response in self.completion_async_method(\n model=self.model,\n messages=messages,\n stream=True,\n request_timeout=600,\n temperature=query.temperature,\n stop=stop_seqs,\n max_tokens=self.max_tokens,\n prompt_truncate_len=self.prompt_truncate_len,\n **additional_args,\n ):\n # Step 3: Transform the CompletionStreamResponse into PartialResponse format\n for choice in response.choices:\n assert isinstance(choice, ChatCompletionResponseStreamChoice)\n if choice.delta.content is None:\n continue\n\n generated_len += len(choice.delta.content)\n complete_response += choice.delta.content\n yield PartialResponse(\n text=choice.delta.content,\n raw_response=response,\n request_id=response.id,\n )\n\n end_t = time.time()\n elapsed_sec = end_t - start_t\n self._log_info(\n {\n \"severity\": \"INFO\",\n \"msg\": \"Request completed\",\n \"query\": query.dict(),\n \"response\": complete_response,\n \"generated_len\": generated_len,\n \"elapsed_sec\": elapsed_sec,\n }\n )\n yield ServerSentEvent(event=\"done\")\n return\n except Exception as e:\n end_t = time.time()\n log_error(\n {\n \"severity\": \"ERROR\",\n \"msg\": \"Invalid request\",\n \"error\": \"\\n\".join(traceback.format_exception(e)),\n \"elapsed_sec\": end_t - start_t,\n \"query\": query.dict(),\n }\n )\n if \"prompt is too long\" in str(e):\n error_type = \"user_message_too_long\"\n else:\n error_type = None\n yield ErrorResponse(allow_retry=False, error_type=error_type, text=str(e))\n return\n finally:\n fireworks.client.api_key = orig_api_key\n\n async def get_settings(self, setting: SettingsRequest) -> SettingsResponse:\n return SettingsResponse(allow_attachments=self.allow_attachments)\n\n async def on_feedback(self, feedback_request: ReportFeedbackRequest) -> None:\n \"\"\"Override this to record feedback from the user.\"\"\"\n pass\n\n async def on_error(self, error_request: ReportErrorRequest) -> None:\n \"\"\"Override this to record errors from the Poe server.\"\"\"\n log_error(\n {\n \"severity\": \"ERROR\",\n \"msg\": \"Error reported\",\n **error_request.dict(),\n }\n )" }, { "identifier": "FireworksPoeImageBot", "path": "fireworks_poe_bot/fw_poe_image_bot.py", "snippet": "class FireworksPoeImageBot(PoeBot):\n def __init__(\n self,\n model: str,\n api_key: str,\n environment: str,\n deployment: str,\n server_version: str,\n gcs_bucket_name: str,\n num_steps: int,\n multi_turn: bool\n ):\n super().__init__()\n self.model = model\n self.api_key = api_key\n self.environment = environment\n self.deployment = deployment\n self.server_version = server_version\n\n model_atoms = model.split(\"/\")\n if len(model_atoms) != 4:\n raise ValueError(\n f\"Expected model name to be in the form accounts/{{modelname}}/models/{{model}}, but got {model}\"\n )\n\n if model_atoms[0] != \"accounts\" or model_atoms[2] != \"models\":\n raise ValueError(\n f\"Expected model name to be in the form accounts/{{modelname}}/models/{{model}}, but got {model}\"\n )\n\n self.account = model_atoms[1]\n self.model = model_atoms[3]\n\n self.client = ImageInference(account=self.account, model=self.model)\n\n self.num_steps = num_steps\n\n self.gcs_bucket_name = gcs_bucket_name\n self.multi_turn = multi_turn\n\n def _log_warn(self, payload: Dict):\n payload = copy.copy(payload)\n payload.update(\n {\n \"severity\": \"WARNING\",\n \"environment\": self.environment,\n \"deployment\": self.deployment,\n \"model\": self.model,\n \"server_version\": self.server_version,\n }\n )\n log_warn(payload)\n\n def _log_info(self, payload: Dict):\n payload = copy.copy(payload)\n payload.update(\n {\n \"severity\": \"INFO\",\n \"environment\": self.environment,\n \"deployment\": self.deployment,\n \"model\": self.model,\n \"server_version\": self.server_version,\n }\n )\n log_info(payload)\n\n async def get_response(\n self, query: QueryRequest\n ) -> AsyncIterable[Union[PartialResponse, ServerSentEvent]]:\n orig_api_key = self.client.api_key\n fireworks.client.api_key = self.api_key\n try:\n start_t = time.time()\n\n if len(query.query) == 0:\n yield ErrorResponse(allow_retry=False, text=\"Empty query\")\n return\n\n messages: List[ChatMessage] = []\n\n for protocol_message in query.query:\n # OpenAI/Fireworks use the \"assistant\" role for the LLM, but Poe uses the\n # \"bot\" role. Replace that one. Otherwise, ignore the role\n if protocol_message.role not in {\"system\", \"user\", \"bot\"}:\n self._log_warn({\"msg\": \"Unknown role\", **protocol_message})\n continue\n if protocol_message.content_type not in {\"text/plain\", \"text/markdown\"}:\n self._log_warn({\"msg\": \"Unknown content type\", **protocol_message})\n continue\n # TODO: support protocol_message.feedback and protocol_message.attachments\n # if needed\n if protocol_message.role == \"bot\":\n role = \"assistant\"\n else:\n role = protocol_message.role\n messages.append({\"role\": role, \"content\": protocol_message.content})\n\n self._log_info(\n {\n \"msg\": \"Request received\",\n **query.dict(),\n }\n )\n\n # The poe servers send us arbitrary lists of messages. We need to do a few things\n # to normalize for our chat completion API:\n # 1. Ensure that all assistant messages are preceded by a user message\n # 2. Merge adjacent messages from the same role\n # 3. Ensure that the last message is a user message\n\n # Ensure that all assistant messages are preceded by a user message\n for i in range(len(messages) - 1, -1, -1):\n if messages[i][\"role\"] == \"assistant\" and (\n i == 0 or messages[i - 1][\"role\"] != \"user\"\n ):\n self._log_warn(\n {\n \"msg\": f\"Assistant message {messages[i]} not preceded by user message\"\n }\n )\n messages.insert(i, {\"role\": \"user\", \"content\": \"\"})\n\n # Merge adjacent messages from the same role\n merged_messages = []\n\n for role, group in groupby(messages, key=lambda x: x[\"role\"]):\n content = \" \".join(message[\"content\"] for message in group)\n merged_messages.append({\"role\": role, \"content\": content})\n\n messages = merged_messages\n\n # Ensure last message is a user message\n if messages[-1][\"role\"] != \"user\":\n self._log_warn({\"msg\": f\"Last message {messages[-1]} not a user message\"})\n messages.append({\"role\": \"user\", \"content\": \"\"})\n\n # generated_len = 0\n\n assert messages[-1][\"role\"] == \"user\"\n prompt = messages[-1][\"content\"]\n\n # TODO: support specifying aspect ratio :)\n\n control_img_uri = None\n for messages in reversed(messages[:-1]):\n if messages[\"role\"] == \"assistant\" and messages[\"content\"].startswith(\n \"![image](\"\n ):\n control_img_uri = messages[\"content\"][9:-1]\n\n if not self.multi_turn or control_img_uri is None:\n answer: Answer = await self.client.text_to_image_async(\n prompt=prompt,\n cfg_scale=7,\n height=1024,\n width=1024,\n sampler=None,\n steps=self.num_steps,\n seed=0,\n safety_check=True,\n output_image_format=\"JPG\",\n )\n else:\n downloaded_image = self._download_image(control_img_uri)\n\n # TODO: don't hardcode this\n min_val, max_val = 100, 200\n image = cv2.Canny(np.array(downloaded_image), min_val, max_val)\n image = image[:, :, None]\n image = np.concatenate([image, image, image], axis=2)\n image = Image.fromarray(image)\n\n answer: Answer = await self.client.control_net_async(\n control_image=image,\n control_net_name=\"canny\",\n conditioning_scale=0.5,\n prompt=prompt,\n cfg_scale=7,\n sampler=None,\n steps=self.num_steps,\n seed=0,\n safety_check=True,\n output_image_format=\"JPG\",\n # Add additional parameters here as necessary\n )\n end_t_inference = time.time()\n start_t_encode = time.time()\n\n if answer.finish_reason == \"CONTENT_FILTERED\":\n yield self.text_event(text=\"Potentially sensitive content detected\")\n\n public_image_url = self._upload_image_to_gcs(\n answer.image, self.gcs_bucket_name\n )\n response_text = f\"![image]({public_image_url})\"\n\n end_t = time.time()\n elapsed_sec = end_t - start_t\n self._log_info(\n {\n \"severity\": \"INFO\",\n \"msg\": \"Request completed\",\n **query.dict(),\n \"response\": response_text,\n \"elapsed_sec\": elapsed_sec,\n \"elapsed_sec_inference\": end_t_inference - start_t,\n \"elapsed_sec_upload\": end_t - start_t_encode,\n }\n )\n yield PartialResponse(text=response_text)\n yield ServerSentEvent(event=\"done\")\n return\n except Exception as e:\n end_t = time.time()\n log_error(\n {\n \"severity\": \"ERROR\",\n \"msg\": \"Invalid request\",\n \"error\": \"\\n\".join(traceback.format_exception(e)),\n \"elapsed_sec\": end_t - start_t,\n **query.dict(),\n }\n )\n if \"prompt is too long\" in str(e):\n error_type = \"user_message_too_long\"\n else:\n error_type = None\n yield ErrorResponse(allow_retry=False, error_type=error_type, text=str(e))\n return\n finally:\n fireworks.client.api_key = orig_api_key\n\n # Function to upload a PIL Image to an S3 bucket with a presigned URL\n def _upload_image_to_s3_with_ttl(\n self, bucket_name, object_name, image: Image, expiration=600\n ):\n \"\"\"\n Upload a PIL Image to an S3 bucket with TTL by generating a presigned URL.\n\n :param bucket_name: String name of the bucket to which the image is uploaded.\n :param object_name: S3 object name. If not specified then file_name is used.\n :param image: PIL Image object to be uploaded.\n :param expiration: Time in seconds for the presigned URL to remain valid.\n \"\"\"\n # In-memory binary streams\n in_mem_file = io.BytesIO()\n\n # Save the PIL image to in-memory file as JPEG\n image.save(in_mem_file, format=\"JPEG\")\n in_mem_file.seek(0) # Reset file pointer to the beginning\n\n # Upload the image to S3\n # self.s3_client.upload_fileobj(in_mem_file, bucket_name, object_name)\n self.s3_client.put_object(\n Bucket=self.s3_bucket_name,\n Key=object_name,\n Body=in_mem_file,\n ContentType=\"image/jpeg\",\n )\n\n # Generate a presigned URL for the S3 object\n url = self.s3_client.generate_presigned_url(\n \"get_object\",\n Params={\"Bucket\": bucket_name, \"Key\": object_name},\n ExpiresIn=expiration,\n )\n\n return url\n\n def _upload_image_to_gcs(self, image: Image, bucket_name: str):\n \"\"\"Uploads a given PIL.Image to a GCS bucket.\"\"\"\n # Generate a (statistically) unique filename with a uuid4\n random_uuid = str(uuid.uuid4()).replace(\"-\", \"\")\n filename = f\"{random_uuid}.jpg\"\n\n # Initialize the GCS client\n client = storage.Client()\n bucket = client.get_bucket(bucket_name)\n\n # Convert the PIL.Image to bytes\n img_byte_arr = io.BytesIO()\n image.save(img_byte_arr, format=\"JPEG\")\n img_byte_arr = img_byte_arr.getvalue()\n\n # Create a new blob (i.e., object) in the bucket and upload the image bytes\n blob = bucket.blob(filename)\n blob.upload_from_string(img_byte_arr, content_type=f\"image/jpeg\")\n\n blob.make_public()\n\n # The public URL can be accessed with the `public_url` attribute\n public_url = blob.public_url\n\n return public_url\n\n def _download_image(self, image_url):\n # Send an HTTP GET request to the image URL\n response = requests.get(image_url)\n\n # Check if the request was successful\n if response.status_code == 200:\n # Read the image content into an in-memory bytes buffer\n image_bytes = io.BytesIO(response.content)\n\n # Use Pillow to open the image from the bytes buffer\n img = Image.open(image_bytes)\n\n return img\n else:\n # If the request failed, raise an HTTPError with the response\n response.raise_for_status()\n\n async def get_settings(self, setting: SettingsRequest) -> SettingsResponse:\n \"\"\"Override this to return non-standard settings.\"\"\"\n return SettingsResponse()\n\n async def on_feedback(self, feedback_request: ReportFeedbackRequest) -> None:\n \"\"\"Override this to record feedback from the user.\"\"\"\n pass\n\n async def on_error(self, error_request: ReportErrorRequest) -> None:\n \"\"\"Override this to record errors from the Poe server.\"\"\"\n log_error(\n {\n \"severity\": \"ERROR\",\n \"msg\": \"Error reported\",\n **error_request.dict(),\n }\n )" }, { "identifier": "FireworksPoeQRBot", "path": "fireworks_poe_bot/fw_poe_qr_bot.py", "snippet": "class FireworksPoeQRBot(PoeBot):\n def __init__(\n self,\n model: str,\n api_key: str,\n environment: str,\n deployment: str,\n server_version: str,\n gcs_bucket_name: str,\n conditioning_scale: float,\n default_cfg_scale: float,\n ):\n super().__init__()\n self.model = model\n self.api_key = api_key\n self.environment = environment\n self.deployment = deployment\n self.server_version = server_version\n self.default_cfg_scale = default_cfg_scale if default_cfg_scale is not None else 8\n\n model_atoms = model.split(\"/\")\n if len(model_atoms) != 4:\n raise ValueError(\n f\"Expected model name to be in the form accounts/{{modelname}}/models/{{model}}, but got {model}\"\n )\n\n if model_atoms[0] != \"accounts\" or model_atoms[2] != \"models\":\n raise ValueError(\n f\"Expected model name to be in the form accounts/{{modelname}}/models/{{model}}, but got {model}\"\n )\n\n self.account = model_atoms[1]\n self.model = model_atoms[3]\n\n self.client = ImageInference(account=self.account, model=self.model)\n\n self.gcs_bucket_name = gcs_bucket_name\n self.conditioning_scale = conditioning_scale\n\n def _log_warn(self, payload: Dict):\n payload = copy.copy(payload)\n payload.update(\n {\n \"severity\": \"WARNING\",\n \"environment\": self.environment,\n \"deployment\": self.deployment,\n \"model\": self.model,\n \"server_version\": self.server_version,\n }\n )\n log_warn(payload)\n\n def _log_info(self, payload: Dict):\n payload = copy.copy(payload)\n payload.update(\n {\n \"severity\": \"INFO\",\n \"environment\": self.environment,\n \"deployment\": self.deployment,\n \"model\": self.model,\n \"server_version\": self.server_version,\n }\n )\n log_info(payload)\n\n async def get_response(\n self, query: QueryRequest\n ) -> AsyncIterable[Union[PartialResponse, ServerSentEvent]]:\n orig_api_key = self.client.api_key\n fireworks.client.api_key = self.api_key\n try:\n start_t = time.time()\n\n if len(query.query) == 0:\n yield ErrorResponse(allow_retry=False, text=\"Empty query\")\n raise\n\n messages: List[ChatMessage] = []\n\n for protocol_message in query.query:\n # OpenAI/Fireworks use the \"assistant\" role for the LLM, but Poe uses the\n # \"bot\" role. Replace that one. Otherwise, ignore the role\n if protocol_message.role not in {\"system\", \"user\", \"bot\"}:\n self._log_warn({\"msg\": \"Unknown role\", **protocol_message})\n continue\n if protocol_message.content_type not in {\"text/plain\", \"text/markdown\"}:\n self._log_warn({\"msg\": \"Unknown content type\", **protocol_message})\n continue\n # TODO: support protocol_message.feedback and protocol_message.attachments\n # if needed\n if protocol_message.role == \"bot\":\n role = \"assistant\"\n else:\n role = protocol_message.role\n messages.append({\"role\": role, \"content\": protocol_message.content})\n\n self._log_info(\n {\n \"msg\": \"Request received\",\n **query.dict(),\n }\n )\n\n # The poe servers send us arbitrary lists of messages. We need to do a few things\n # to normalize for our chat completion API:\n # 1. Ensure that all assistant messages are preceded by a user message\n # 2. Merge adjacent messages from the same role\n # 3. Ensure that the last message is a user message\n\n # Ensure that all assistant messages are preceded by a user message\n for i in range(len(messages) - 1, -1, -1):\n if messages[i][\"role\"] == \"assistant\" and (\n i == 0 or messages[i - 1][\"role\"] != \"user\"\n ):\n self._log_warn(\n {\n \"msg\": f\"Assistant message {messages[i]} not preceded by user message\"\n }\n )\n messages.insert(i, {\"role\": \"user\", \"content\": \"\"})\n\n # Merge adjacent messages from the same role\n merged_messages = []\n\n for role, group in groupby(messages, key=lambda x: x[\"role\"]):\n content = \" \".join(message[\"content\"] for message in group)\n merged_messages.append({\"role\": role, \"content\": content})\n\n messages = merged_messages\n\n # Ensure last message is a user message\n if messages[-1][\"role\"] != \"user\":\n self._log_warn({\"msg\": f\"Last message {messages[-1]} not a user message\"})\n messages.append({\"role\": \"user\", \"content\": \"\"})\n\n # generated_len = 0\n\n assert messages[-1][\"role\"] == \"user\"\n prompt = messages[-1][\"content\"]\n\n try:\n prompt, qr_data, qr_strength, prompt_strength, model = parse_input(prompt, self.conditioning_scale, self.default_cfg_scale)\n except Exception as e:\n yield self.text_event(text=f\"Error parsing input: {e}\")\n return\n\n if model == \"sdxl\":\n self.client.model = \"stable-diffusion-xl-1024-v1-0\"\n elif model == \"sdv1.5\":\n self.client.model = \"stable-diffusion-v1-5\"\n else:\n yield self.text_event(text=f\"Unknown model: {model}. Model must be one of 'sdxl' or 'sdv1.5'.\")\n return\n\n qr_image = gen_qr_code(qr_data)\n\n answer: Answer = await self.client.control_net_async(\n control_image=qr_image,\n control_net_name=\"qr\",\n conditioning_scale=qr_strength,\n prompt=prompt,\n cfg_scale=prompt_strength,\n sampler=None,\n steps=25,\n seed=0,\n safety_check=False,\n output_image_format=\"JPG\",\n # Add additional parameters here as necessary\n )\n\n end_t_inference = time.time()\n start_t_encode = time.time()\n\n if answer.finish_reason == \"CONTENT_FILTERED\":\n yield self.text_event(text=\"Potentially sensitive content detected\")\n return\n\n public_image_url = self._upload_image_to_gcs(\n answer.image, self.gcs_bucket_name\n )\n\n response_text = f\"![{prompt}]({public_image_url})\"\n\n end_t = time.time()\n elapsed_sec = end_t - start_t\n self._log_info(\n {\n \"severity\": \"INFO\",\n \"msg\": \"Request completed\",\n **query.dict(),\n \"prompt\": prompt,\n \"qr_data\": qr_data,\n \"qr_strength\": qr_strength,\n \"prompt_strength\": prompt_strength,\n \"response\": response_text,\n \"elapsed_sec\": elapsed_sec,\n \"elapsed_sec_inference\": end_t_inference - start_t,\n \"elapsed_sec_upload\": end_t - start_t_encode,\n }\n )\n yield PartialResponse(text=response_text)\n yield ServerSentEvent(event=\"done\")\n return\n except Exception as e:\n end_t = time.time()\n log_error(\n {\n \"severity\": \"ERROR\",\n \"msg\": \"Invalid request\",\n \"error\": \"\\n\".join(traceback.format_exception(e)),\n \"elapsed_sec\": end_t - start_t,\n **query.dict(),\n }\n )\n if \"prompt is too long\" in str(e):\n error_type = \"user_message_too_long\"\n else:\n error_type = None\n yield ErrorResponse(allow_retry=False, error_type=error_type, text=str(e))\n return\n finally:\n fireworks.client.api_key = orig_api_key\n\n # Function to upload a PIL Image to an S3 bucket with a presigned URL\n def _upload_image_to_s3_with_ttl(\n self, bucket_name, object_name, image: Image, expiration=600\n ):\n \"\"\"\n Upload a PIL Image to an S3 bucket with TTL by generating a presigned URL.\n\n :param bucket_name: String name of the bucket to which the image is uploaded.\n :param object_name: S3 object name. If not specified then file_name is used.\n :param image: PIL Image object to be uploaded.\n :param expiration: Time in seconds for the presigned URL to remain valid.\n \"\"\"\n # In-memory binary streams\n in_mem_file = io.BytesIO()\n\n # Save the PIL image to in-memory file as JPEG\n image.save(in_mem_file, format=\"JPEG\")\n in_mem_file.seek(0) # Reset file pointer to the beginning\n\n # Upload the image to S3\n # self.s3_client.upload_fileobj(in_mem_file, bucket_name, object_name)\n self.s3_client.put_object(\n Bucket=self.s3_bucket_name,\n Key=object_name,\n Body=in_mem_file,\n ContentType=\"image/jpeg\",\n )\n\n # Generate a presigned URL for the S3 object\n url = self.s3_client.generate_presigned_url(\n \"get_object\",\n Params={\"Bucket\": bucket_name, \"Key\": object_name},\n ExpiresIn=expiration,\n )\n\n return url\n\n def _upload_image_to_gcs(self, image: Image, bucket_name: str):\n \"\"\"Uploads a given PIL.Image to a GCS bucket.\"\"\"\n # Generate a (statistically) unique filename with a uuid4\n random_uuid = str(uuid.uuid4()).replace(\"-\", \"\")\n filename = f\"{random_uuid}.jpg\"\n\n # Initialize the GCS client\n client = storage.Client()\n bucket = client.get_bucket(bucket_name)\n\n # Convert the PIL.Image to bytes\n img_byte_arr = io.BytesIO()\n image.save(img_byte_arr, format=\"JPEG\")\n img_byte_arr = img_byte_arr.getvalue()\n\n # Create a new blob (i.e., object) in the bucket and upload the image bytes\n blob = bucket.blob(filename)\n blob.upload_from_string(img_byte_arr, content_type=f\"image/jpeg\")\n\n blob.make_public()\n\n # The public URL can be accessed with the `public_url` attribute\n public_url = blob.public_url\n\n return public_url\n\n def _download_image(self, image_url):\n # Send an HTTP GET request to the image URL\n response = requests.get(image_url)\n\n # Check if the request was successful\n if response.status_code == 200:\n # Read the image content into an in-memory bytes buffer\n image_bytes = io.BytesIO(response.content)\n\n # Use Pillow to open the image from the bytes buffer\n img = Image.open(image_bytes)\n\n return img\n else:\n # If the request failed, raise an HTTPError with the response\n response.raise_for_status()\n\n async def get_settings(self, setting: SettingsRequest) -> SettingsResponse:\n \"\"\"Override this to return non-standard settings.\"\"\"\n return SettingsResponse()\n\n async def on_feedback(self, feedback_request: ReportFeedbackRequest) -> None:\n \"\"\"Override this to record feedback from the user.\"\"\"\n pass\n\n async def on_error(self, error_request: ReportErrorRequest) -> None:\n \"\"\"Override this to record errors from the Poe server.\"\"\"\n log_error(\n {\n \"severity\": \"ERROR\",\n \"msg\": \"Error reported\",\n **error_request.dict(),\n }\n )" }, { "identifier": "UVICORN_LOGGING_CONFIG", "path": "fireworks_poe_bot/logging.py", "snippet": "UVICORN_LOGGING_CONFIG = copy.deepcopy(uvicorn.config.LOGGING_CONFIG)" }, { "identifier": "LoggingPlugin", "path": "fireworks_poe_bot/plugin.py", "snippet": "class LoggingPlugin(ABC):\n @abstractmethod\n def log_warn(self, payload: Dict[str, Any]):\n ...\n\n @abstractmethod\n def log_info(self, payload: Dict[str, Any]):\n ...\n\n @abstractmethod\n def log_error(self, payload: Dict[str, Any]):\n ..." }, { "identifier": "register_logging_plugin", "path": "fireworks_poe_bot/plugin.py", "snippet": "def register_logging_plugin(plugin: LoggingPlugin):\n _LOGGING_PLUGINS.append(plugin)" }, { "identifier": "BOT_PLUGINS", "path": "fireworks_poe_bot/plugin.py", "snippet": "BOT_PLUGINS: List[_BotPlugin] = []" }, { "identifier": "log_info", "path": "fireworks_poe_bot/plugin.py", "snippet": "@abstractmethod\ndef log_info(self, payload: Dict[str, Any]):\n ..." }, { "identifier": "make_app", "path": "fireworks_poe_bot/fastapi_poe/base.py", "snippet": "def make_app(\n bots: Dict[str, PoeBot],\n access_key: str = \"\",\n *,\n api_key: str = \"\",\n allow_without_key: bool = False,\n) -> FastAPI:\n \"\"\"Create an app object. Arguments are as for run().\"\"\"\n app = FastAPI()\n app.add_exception_handler(RequestValidationError, exception_handler)\n\n global auth_key\n auth_key = _verify_access_key(\n access_key=access_key, api_key=api_key, allow_without_key=allow_without_key\n )\n\n def find_bot(account: str, model: str) -> PoeBot:\n bot_fqn = f\"accounts/{account}/models/{model}\"\n if bot_fqn not in bots:\n raise HTTPException(status_code=404, detail=f\"Bot {bot_fqn} not found\")\n return bots[bot_fqn]\n\n @app.get(\"/\")\n async def index() -> Response:\n # Default endpoint for health checks\n return HTMLResponse(\"It works!\")\n\n @app.get(\"/accounts/{account}/models/{model}\")\n async def index(account: str, model: str) -> Response:\n bot = find_bot(account, model)\n\n url = \"https://poe.com/create_bot?server=1\"\n return HTMLResponse(\n \"<html><body><h1>FastAPI Poe bot server</h1><p>Congratulations! Your server\"\n \" is running. To connect it to Poe, create a bot at <a\"\n f' href=\"{url}\">{url}</a>.</p></body></html>'\n )\n\n @app.post(\"/accounts/{account}/models/{model}\")\n async def poe_post(\n account: str, model: str, request: Dict[str, Any], dict=Depends(auth_user)\n ) -> Response:\n bot = find_bot(account, model)\n\n if request[\"type\"] == \"query\":\n return EventSourceResponse(\n bot.handle_query(\n QueryRequest.parse_obj(\n {\n **request,\n \"access_key\": auth_key or \"<missing>\",\n \"api_key\": auth_key or \"<missing>\",\n }\n )\n )\n )\n elif request[\"type\"] == \"settings\":\n return await bot.handle_settings(SettingsRequest.parse_obj(request))\n elif request[\"type\"] == \"report_feedback\":\n return await bot.handle_report_feedback(\n ReportFeedbackRequest.parse_obj(request)\n )\n elif request[\"type\"] == \"report_error\":\n return await bot.handle_report_error(ReportErrorRequest.parse_obj(request))\n else:\n raise HTTPException(status_code=501, detail=\"Unsupported request type\")\n\n # Uncomment this line to print out request and response\n # app.add_middleware(LoggingMiddleware)\n return app" } ]
from fireworks_poe_bot.fw_poe_text_bot import FireworksPoeTextBot from fireworks_poe_bot.fw_poe_image_bot import FireworksPoeImageBot from fireworks_poe_bot.fw_poe_qr_bot import FireworksPoeQRBot from fireworks_poe_bot.logging import UVICORN_LOGGING_CONFIG from fireworks_poe_bot.plugin import LoggingPlugin, register_logging_plugin, BOT_PLUGINS, log_info from dataclasses import dataclass from typing import Any, Dict from .fastapi_poe import make_app import argparse import uvicorn import logging import os import json
10,539
@dataclass class ServerArgs: host: str = "0.0.0.0" port: int = 80 config_file_path: str = "config.json" environment: str = "" deployment: str = "poe-omnibot" class PyLoggingPlugin(LoggingPlugin): def log_warn(self, payload: Dict[str, Any]): logging.warning(payload) def log_info(self, payload: Dict[str, Any]): logging.info(payload) def log_error(self, payload: Dict[str, Any]): logging.error(payload) def main(args=None): if args is None: parser = argparse.ArgumentParser( prog="fireworks_poe_bot", description=f""" Fireworks LLM Poe Server Bot Copyright (c) 2023 Fireworks.ai, Inc. and affiliates. """, ) # Server args. server_args = ServerArgs() server_group = parser.add_argument_group("server", "Server arguments") server_group.add_argument("--host", type=str, default=server_args.host) server_group.add_argument("-p", "--port", type=int, default=server_args.port) server_group.add_argument( "-c", "--config-file-path", type=str, default=server_args.config_file_path ) server_group.add_argument( "-e", "--environment", type=str, default=server_args.environment ) server_group.add_argument( "-d", "--deployment", type=str, default=server_args.deployment ) args = parser.parse_args() # Parse arguments. for k, v in vars(args).items(): for g in [server_args]: if hasattr(g, k): setattr(g, k, v) break else: assert k in ["print_supported_models"], f"Unknown argument {k}" # Register default logging plugin register_logging_plugin(PyLoggingPlugin()) # Load bots from config with open(args.config_file_path) as f: config = json.load(f) remaining_config_keys = set(config.keys()) bots = {} for plugin in BOT_PLUGINS: if plugin.config_key in config: remaining_config_keys.remove(plugin.config_key) for config_dict in config[plugin.config_key]: bot_config = plugin.BotConfigClass(**config_dict) model_fqn = bot_config.model_fqn ctor_dict = bot_config.dict() for k in list(ctor_dict.keys()): if k.startswith("SERVER_"): ctor_dict.pop(k) bots[model_fqn] = plugin.BotPluginClass( environment=args.environment, deployment=args.deployment, server_version="0.0.1", # FIXME: versioneer? **ctor_dict ) if len(remaining_config_keys) > 0: raise ValueError( f"Unknown config keys: {remaining_config_keys}, supported keys: {set([plugin.config_key for plugin in BOT_PLUGINS])}" ) log_info({'message': f"Loaded bots: {bots}"}) assert ( len(bots) > 0 ), "No bots specified, use --text-models or --image-models to specify models to serve"
@dataclass class ServerArgs: host: str = "0.0.0.0" port: int = 80 config_file_path: str = "config.json" environment: str = "" deployment: str = "poe-omnibot" class PyLoggingPlugin(LoggingPlugin): def log_warn(self, payload: Dict[str, Any]): logging.warning(payload) def log_info(self, payload: Dict[str, Any]): logging.info(payload) def log_error(self, payload: Dict[str, Any]): logging.error(payload) def main(args=None): if args is None: parser = argparse.ArgumentParser( prog="fireworks_poe_bot", description=f""" Fireworks LLM Poe Server Bot Copyright (c) 2023 Fireworks.ai, Inc. and affiliates. """, ) # Server args. server_args = ServerArgs() server_group = parser.add_argument_group("server", "Server arguments") server_group.add_argument("--host", type=str, default=server_args.host) server_group.add_argument("-p", "--port", type=int, default=server_args.port) server_group.add_argument( "-c", "--config-file-path", type=str, default=server_args.config_file_path ) server_group.add_argument( "-e", "--environment", type=str, default=server_args.environment ) server_group.add_argument( "-d", "--deployment", type=str, default=server_args.deployment ) args = parser.parse_args() # Parse arguments. for k, v in vars(args).items(): for g in [server_args]: if hasattr(g, k): setattr(g, k, v) break else: assert k in ["print_supported_models"], f"Unknown argument {k}" # Register default logging plugin register_logging_plugin(PyLoggingPlugin()) # Load bots from config with open(args.config_file_path) as f: config = json.load(f) remaining_config_keys = set(config.keys()) bots = {} for plugin in BOT_PLUGINS: if plugin.config_key in config: remaining_config_keys.remove(plugin.config_key) for config_dict in config[plugin.config_key]: bot_config = plugin.BotConfigClass(**config_dict) model_fqn = bot_config.model_fqn ctor_dict = bot_config.dict() for k in list(ctor_dict.keys()): if k.startswith("SERVER_"): ctor_dict.pop(k) bots[model_fqn] = plugin.BotPluginClass( environment=args.environment, deployment=args.deployment, server_version="0.0.1", # FIXME: versioneer? **ctor_dict ) if len(remaining_config_keys) > 0: raise ValueError( f"Unknown config keys: {remaining_config_keys}, supported keys: {set([plugin.config_key for plugin in BOT_PLUGINS])}" ) log_info({'message': f"Loaded bots: {bots}"}) assert ( len(bots) > 0 ), "No bots specified, use --text-models or --image-models to specify models to serve"
app = make_app(bots, allow_without_key=True)
8
2023-11-03 23:24:23+00:00
12k
Fsoft-AIC/LSDM
util/model_util.py
[ { "identifier": "SceneDiffusionModel", "path": "model/sdm.py", "snippet": "class SceneDiffusionModel(nn.Module):\n def __init__(self, seg_len=256, modality='text', clip_version='ViT-B/32', clip_dim=768, dropout=0.1, n_layer=6, n_head=8, f_vert=64, dim_ff=512,\n cat_emb=32, mesh_ds_dir=\"data/mesh_ds\", posa_path=None, latent_dim=128, cond_mask_prob=1.0, device=0, vert_dims=655, obj_cat=8, \n data_rep='rot6d', njoints=251, use_cuda=True, pcd_points=1024, pcd_dim=128, xyz_dim=3, max_cats=13, translation_params=12,\n pcd_backbone_type=\"PNT2\", human_backbone_type=\"POSA\", text_encoder_type=\"CLIP\", **kwargs) -> None:\n super().__init__()\n self.seg_len = seg_len\n self.pcd_points = pcd_points\n self.clip_version = clip_version\n self.clip_dim = clip_dim\n self.latent_dim = latent_dim\n self.pcd_dim = pcd_dim\n self.pcd_points = pcd_points\n self.xyz_dim = xyz_dim\n self.extract_dim = self.latent_dim\n self.dropout = dropout\n self.cond_mask_prob = cond_mask_prob\n self.data_rep = data_rep\n self.input_feats = vert_dims * obj_cat\n self.n_head = n_head\n self.translation_params = translation_params\n self.text_encoder_type = text_encoder_type\n self.device = \"cuda:{}\".format(device) if use_cuda else \"cpu\"\n\n # Setup modality for the model, e.g., text.\n self.modality = modality\n self._set_up_modality()\n\n # Setup timestep embedding layer\n self.sequence_pos_encoder = PositionalEncoding(self.latent_dim, self.dropout, device=self.device)\n self.embed_timestep = TimestepEmbedder(self.latent_dim, self.sequence_pos_encoder, device=self.device)\n\n # Setup embedding layer for modality\n self.saved_cat = None\n self.embed_text = nn.Sequential(\n nn.Linear(self.clip_dim, self.clip_dim//2),\n nn.GELU(),\n nn.Linear(self.clip_dim//2, self.latent_dim*2),\n nn.GELU(),\n nn.Linear(self.latent_dim*2, self.latent_dim),\n nn.GELU(),\n ).to(self.device)\n\n # Setup embedding layer for categories\n self.embed_cat = nn.Sequential(\n nn.Linear(max_cats, cat_emb),\n nn.GELU(),\n ).to(self.device)\n\n # Setup inference for categorical\n self.predict_cat = nn.Sequential(\n nn.Linear(self.latent_dim, self.latent_dim//2),\n nn.GELU(),\n nn.Linear(self.latent_dim//2, self.latent_dim//4),\n nn.GELU(),\n nn.Linear(self.latent_dim//4, max_cats),\n nn.GELU(),\n nn.Softmax(dim=2),\n ).to(self.device)\n \n # Setup attention layer\n self.attn_layer = MultiheadAttention(embed_dim=self.latent_dim, num_heads=n_head, kdim=cat_emb, vdim=pcd_points*pcd_dim, batch_first=True).to(self.device)\n \n # Setup translation layer\n self.translation_layer = nn.Sequential(\n nn.Linear(self.latent_dim + cat_emb, self.latent_dim),\n nn.GELU(),\n nn.Linear(self.latent_dim, self.translation_params),\n nn.GELU(),\n ).to(self.device)\n self.point_wise_trans_layer = nn.Sequential(\n nn.Linear(self.translation_params + self.xyz_dim, self.xyz_dim),\n nn.GELU(),\n ).to(self.device)\n\n # Setup pointcloud backbone for point cloud extraction\n # Point cloud backbone\n self.pcd_attention = MultiheadAttention(embed_dim=self.translation_params, num_heads=self.translation_params, kdim=self.xyz_dim, vdim=self.xyz_dim, batch_first=True).to(self.device)\n if pcd_backbone_type == \"DGCNN\":\n self.pcd_backbone = DGCNN(emb_dims=clip_dim, output_channels=pcd_points*xyz_dim).to(self.device)\n else:\n self.pcd_backbone = get_backbone(self.pcd_dim).to(self.device)\n\n if human_backbone_type == \"P2R\":\n self.human_backbone = STGCN().to(self.device)\n else:\n self.human_backbone = POSA_Decoder(input_feats=xyz_dim, pcd_dim=self.pcd_points).to(self.device)\n # self.pcd_attention = MultiheadAttention(embed_dim=self.latent_dim)\n\n # Setup combination layers for extracted information\n self.upsampling_layer = nn.Sequential(\n nn.Linear(1, 128),\n nn.GELU(),\n nn.Linear(128, 512),\n nn.GELU(),\n nn.Linear(512, self.pcd_points),\n nn.GELU(),\n ).to(self.device)\n\n self.combine_extraction = nn.Sequential(\n # nn.Linear(self.latent_dim*2, self.latent_dim*1.5),\n # nn.GELU(),\n nn.Linear(self.latent_dim*2, self.extract_dim),\n nn.GELU(),\n ).to(self.device)\n\n # Setup U-net-like input and output process\n self.input_process = InputProcess(self.data_rep, self.xyz_dim, self.extract_dim).to(self.device)\n self.output_process = OutputProcess(self.data_rep, self.xyz_dim, self.extract_dim, self.pcd_points).to(self.device)\n \n # Setup save guiding points for visualization purpose\n self.saved_guiding_points = None\n\n def forward(self, x, mask, timesteps, given_objs, given_cats, y=None, force_mask=False):\n \"\"\"\n x: noisy signal - torch.Tensor.shape([bs, seq_len, dims, cat]). E.g, 1, 256, 655, 8\n vertices: torch.Tensor.shape([bs, seq_len, dim, 3])\n mask: torch.Tensor.shape([bs, seq_len])\n timesteps: torch.Tensor.shape([bs,])\n y: modality, e.g., text\n force_mask: mask for point cloud, only use for editing\n \"\"\"\n # Embed features from time\n emb_ts = self.embed_timestep(timesteps)\n emb_ts = emb_ts.permute(1, 0, 2)\n\n # Embed features from modality\n if self.modality == 'text':\n if self.text_encoder_type == \"CLIP\":\n enc_text = self._encode_text_clip(y)\n else:\n enc_text = self._encode_text_bert(y)\n \n # Pass through linear layer of text\n enc_text = self.embed_text(enc_text)\n # enc_text = self.embed_text(self._mask_cond(enc_text, force_mask=force_mask))\n enc_text = enc_text.unsqueeze(1)\n\n # Predict output categorical\n out_cat = self.predict_cat(enc_text.clone().detach())\n self.saved_cat = out_cat\n\n # Embed information from categories\n emb_cat = self.embed_cat(given_cats)\n \n # Combine features from timestep and modality\n emb = torch.cat((emb_ts, enc_text), dim=-1)\n emb = emb.permute(0, 2, 1)\n emb = self.upsampling_layer(emb)\n emb = emb.permute(0, 2, 1)\n\n # Embed point clouds feature\n bs, num_obj, num_points, pcd_dim = given_objs.shape\n\n # Get human pose features\n hm_in = given_objs[:,0].clone().detach()\n given_objs = given_objs.view(bs * num_obj, num_points, pcd_dim)\n hm_out = self.human_backbone(hm_in)\n pcd_out = self.pcd_backbone(given_objs)\n pcd_out = pcd_out.reshape(bs, num_obj, -1)\n\n # Pass through attention layer to attain attention matrix\n attn_mask = mask.unsqueeze(1).clone().detach()\n attn_mask = attn_mask.repeat(self.n_head, 1, 1)\n attn_output, attn_output_weights = self.attn_layer(enc_text, emb_cat, pcd_out, attn_mask=attn_mask)\n \n # Pass through translation layer\n enc_text = enc_text.repeat(1, num_obj, 1)\n emb_cat = torch.cat((emb_cat, enc_text), dim=-1)\n translation_output = self.translation_layer(emb_cat).unsqueeze(-2).repeat(1, 1, self.pcd_points, 1)\n translation_output = translation_output.view(-1, self.pcd_points, self.translation_params)\n\n # Pass through point cloud backbone and retrieve spatial relation\n pcd_out = pcd_out.permute(0, 2, 1)\n pcd_out = pcd_out * attn_output_weights\n pcd_out = pcd_out.reshape(bs, num_obj, num_points, -1)\n pcd_trans = pcd_out.view(-1, self.pcd_points, self.xyz_dim)\n pcd_trans, _ = self.pcd_attention(translation_output, pcd_trans, pcd_trans)\n pcd_trans = pcd_trans.view(bs, num_obj, num_points, -1)\n pcd_out = torch.cat((pcd_out, pcd_trans), dim=-1)\n pcd_out = self.point_wise_trans_layer(pcd_out)\n pcd_out = pcd_out.reshape(num_points, -1, bs, num_obj)\n pcd_out = pcd_out * mask\n pcd_out = pcd_out.reshape(bs, num_obj, num_points, -1)\n pcd_out = pcd_out.sum(dim=1)\n pcd_out = (pcd_out + hm_out)/2\n x += pcd_out\n\n # Final embedding features\n # emb = torch.cat((emb, pcd_out), dim=-1)\n emb = self.combine_extraction(emb)\n\n # Reconstruct features\n x = self.input_process(x, emb)\n x = self.output_process(x)\n\n # For guiding points only\n pcd_out = self.input_process(pcd_out, emb)\n pcd_out = self.output_process(pcd_out)\n self.saved_guiding_points = pcd_out\n return out_cat, x\n\n def _set_up_modality(self):\n assert self.modality in ['text', 'audio', None]\n if self.modality == 'text':\n self.embed_text = nn.Sequential(\n nn.Linear(self.clip_dim, self.clip_dim//2),\n nn.GELU(),\n nn.Linear(self.clip_dim//2, self.latent_dim),\n nn.GELU()\n ).to(self.device)\n if self.text_encoder_type == \"CLIP\":\n self.clip_version = self.clip_version\n self.clip_model = self._load_and_freeze_clip(self.clip_version, device=self.device)\n else:\n self._load_and_freeze_bert()\n\n def _mask_cond(self, cond, force_mask=False):\n bs, d = cond.shape\n if force_mask:\n return torch.zeros_like(cond)\n elif self.training and self.cond_mask_prob > 0.:\n mask = torch.bernoulli(torch.ones(bs, device=cond.device) * self.cond_mask_prob).view(bs, 1) # 1-> use null_cond, 0-> use real cond\n return cond * (1. - mask)\n else:\n return cond\n \n def _encode_text_clip(self, raw_text):\n # raw_text - list (batch_size length) of strings with input text prompts\n device = self.device\n max_text_len = 20 # Specific hardcoding for humanml dataset\n if max_text_len is not None:\n default_context_length = 77\n context_length = max_text_len + 2 # start_token + 20 + end_token\n assert context_length < default_context_length\n texts = clip.tokenize(raw_text, context_length=context_length, truncate=True).to(device) # [bs, context_length] # if n_tokens > context_length -> will truncate\n zero_pad = torch.zeros([texts.shape[0], default_context_length-context_length], dtype=texts.dtype, device=texts.device)\n texts = torch.cat([texts, zero_pad], dim=1)\n # print('texts after pad', texts.shape, texts)\n else:\n texts = clip.tokenize(raw_text, truncate=True).to(device) # [bs, context_length] # if n_tokens > 77 -> will truncate\n return self.clip_model.encode_text(texts).float()\n\n def _encode_text_bert(self, raw_text):\n encoded_input = self.tokenizer(list(raw_text), padding=True, return_tensors='pt').to(self.device)\n output = self.text_encoder_model(**encoded_input)\n return output.pooler_output\n\n def _load_and_freeze_clip(self, clip_version, device=None):\n clip_model, clip_preprocess = clip.load(clip_version, device=device,\n jit=False) # Must set jit=False for training\n clip.model.convert_weights(\n clip_model) # Actually this line is unnecessary since clip by default already on float16\n\n # Freeze CLIP weights\n clip_model.eval()\n for p in clip_model.parameters():\n p.requires_grad = False\n\n return clip_model\n \n def _load_and_freeze_bert(self):\n bert_version = \"bert-base-uncased\"\n self.tokenizer = BertTokenizer.from_pretrained(bert_version)\n self.text_encoder_model = BertModel.from_pretrained(bert_version).to(self.device)\n self.text_encoder_model.eval()\n for p in self.text_encoder_model.parameters():\n p.requires_grad = False" }, { "identifier": "gaussian_diffusion", "path": "diffusion/gaussian_diffusion.py", "snippet": "def get_named_beta_schedule(schedule_name, num_diffusion_timesteps, scale_betas=1.):\ndef betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):\n def is_vb(self):\n def __init__(\n self,\n *,\n betas,\n model_mean_type,\n model_var_type,\n loss_type,\n rescale_timesteps=False,\n lambda_rcxyz=0.,\n lambda_vel=0.,\n lambda_pose=1.,\n lambda_orient=1.,\n lambda_loc=1.,\n data_rep='rot6d',\n lambda_root_vel=0.,\n lambda_vel_rcxyz=0.,\n lambda_fc=0.,\n lambda_cat=0.05,\n ):\n def masked_l2(self, a, b, mask):\n def q_mean_variance(self, x_start, t):\n def q_sample(self, x_start, t, noise=None):\n def q_posterior_mean_variance(self, x_start, x_t, t):\n def p_mean_variance(\n self, model, x, mask, t, given_objs, given_cats, y, clip_denoised=True, denoised_fn=None, model_kwargs=None\n ):\n def process_xstart(x):\n def _predict_xstart_from_eps(self, x_t, t, eps):\n def _predict_xstart_from_xprev(self, x_t, t, xprev):\n def _predict_eps_from_xstart(self, x_t, t, pred_xstart):\n def _scale_timesteps(self, t):\n def condition_mean(self, cond_fn, p_mean_var, x, t, model_kwargs=None):\n def condition_mean_with_grad(self, cond_fn, p_mean_var, x, t, model_kwargs=None):\n def condition_score(self, cond_fn, p_mean_var, x, t, model_kwargs=None):\n def condition_score_with_grad(self, cond_fn, p_mean_var, x, t, model_kwargs=None):\n def p_sample(\n self,\n model,\n x,\n mask,\n t,\n given_objs, \n given_cats,\n y,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n const_noise=False,\n ):\n def p_sample_with_grad(\n self,\n model,\n x,\n t,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n ):\n def p_sample_loop(\n self,\n model,\n shape,\n mask,\n given_objs, \n given_cats,\n y,\n noise=None,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n device=None,\n progress=False,\n skip_timesteps=0,\n init_image=None,\n randomize_class=False,\n cond_fn_with_grad=False,\n dump_steps=None,\n const_noise=False,\n ):\n def p_sample_loop_progressive(\n self,\n model,\n shape,\n mask,\n given_objs, \n given_cats,\n y,\n noise=None,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n device=None,\n progress=False,\n skip_timesteps=0,\n init_image=None,\n randomize_class=False,\n cond_fn_with_grad=False,\n const_noise=False,\n ):\n def ddim_sample(\n self,\n model,\n x,\n t,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n eta=0.0,\n ):\n def ddim_sample_with_grad(\n self,\n model,\n x,\n t,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n eta=0.0,\n ):\n def ddim_reverse_sample(\n self,\n model,\n x,\n t,\n clip_denoised=True,\n denoised_fn=None,\n model_kwargs=None,\n eta=0.0,\n ):\n def ddim_sample_loop(\n self,\n model,\n shape,\n noise=None,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n device=None,\n progress=False,\n eta=0.0,\n skip_timesteps=0,\n init_image=None,\n randomize_class=False,\n cond_fn_with_grad=False,\n dump_steps=None,\n const_noise=False,\n ):\n def ddim_sample_loop_progressive(\n self,\n model,\n shape,\n noise=None,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n device=None,\n progress=False,\n eta=0.0,\n skip_timesteps=0,\n init_image=None,\n randomize_class=False,\n cond_fn_with_grad=False,\n ):\n def plms_sample(\n self,\n model,\n x,\n t,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n cond_fn_with_grad=False,\n order=2,\n old_out=None,\n ):\n def get_model_output(x, t):\n def plms_sample_loop(\n self,\n model,\n shape,\n noise=None,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n device=None,\n progress=False,\n skip_timesteps=0,\n init_image=None,\n randomize_class=False,\n cond_fn_with_grad=False,\n order=2,\n ):\n def plms_sample_loop_progressive(\n self,\n model,\n shape,\n noise=None,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n device=None,\n progress=False,\n skip_timesteps=0,\n init_image=None,\n randomize_class=False,\n cond_fn_with_grad=False,\n order=2,\n ):\n def _vb_terms_bpd(\n self, model, x_start, x_t, t, clip_denoised=True, model_kwargs=None\n ):\n def training_losses(self, model, cf, mask, t, given_objs, given_cats, target_cat, y=None, noise=None):\n def fc_loss_rot_repr(self, gt_xyz, pred_xyz, mask):\n def to_np_cpu(x):\n def foot_contact_loss_humanml3d(self, target, model_output):\n def velocity_consistency_loss_humanml3d(self, target, model_output):\n def _prior_bpd(self, x_start):\n def calc_bpd_loop(self, model, x_start, clip_denoised=True, model_kwargs=None):\ndef _extract_into_tensor(arr, timesteps, broadcast_shape):\nclass ModelMeanType(enum.Enum):\nclass ModelVarType(enum.Enum):\nclass LossType(enum.Enum):\nclass GaussianDiffusion:\n PREVIOUS_X = enum.auto() # the model predicts x_{t-1}\n START_X = enum.auto() # the model predicts x_0\n EPSILON = enum.auto() # the model predicts epsilon\n LEARNED = enum.auto()\n FIXED_SMALL = enum.auto()\n FIXED_LARGE = enum.auto()\n LEARNED_RANGE = enum.auto()\n MSE = enum.auto() # use raw MSE loss (and KL when learning variances)\n RESCALED_MSE = (\n enum.auto()\n ) # use raw MSE loss (with RESCALED_KL when learning variances)\n KL = enum.auto() # use the variational lower-bound\n RESCALED_KL = enum.auto() # like KL, but rescale to estimate the full VLB\n B, C = x.shape[:2]\n B, C = x_t.shape[:2]" }, { "identifier": "SpacedDiffusion", "path": "diffusion/respace.py", "snippet": "class SpacedDiffusion(GaussianDiffusion):\n \"\"\"\n A diffusion process which can skip steps in a base diffusion process.\n\n :param use_timesteps: a collection (sequence or set) of timesteps from the\n original diffusion process to retain.\n :param kwargs: the kwargs to create the base diffusion process.\n \"\"\"\n\n def __init__(self, use_timesteps, **kwargs):\n self.use_timesteps = set(use_timesteps)\n self.timestep_map = []\n self.original_num_steps = len(kwargs[\"betas\"])\n\n base_diffusion = GaussianDiffusion(**kwargs) # pylint: disable=missing-kwoa\n last_alpha_cumprod = 1.0\n new_betas = []\n for i, alpha_cumprod in enumerate(base_diffusion.alphas_cumprod):\n if i in self.use_timesteps:\n new_betas.append(1 - alpha_cumprod / last_alpha_cumprod)\n last_alpha_cumprod = alpha_cumprod\n self.timestep_map.append(i)\n kwargs[\"betas\"] = np.array(new_betas)\n super().__init__(**kwargs)\n\n def p_mean_variance(\n self, model, *args, **kwargs\n ): # pylint: disable=signature-differs\n return super().p_mean_variance(self._wrap_model(model), *args, **kwargs)\n\n def training_losses(\n self, model, *args, **kwargs\n ): # pylint: disable=signature-differs\n return super().training_losses(self._wrap_model(model), *args, **kwargs)\n\n def condition_mean(self, cond_fn, *args, **kwargs):\n return super().condition_mean(self._wrap_model(cond_fn), *args, **kwargs)\n\n def condition_score(self, cond_fn, *args, **kwargs):\n return super().condition_score(self._wrap_model(cond_fn), *args, **kwargs)\n\n def _wrap_model(self, model):\n if isinstance(model, _WrappedModel):\n return model\n return _WrappedModel(\n model, self.timestep_map, self.rescale_timesteps, self.original_num_steps\n )\n\n def _scale_timesteps(self, t):\n # Scaling is done by the wrapped model.\n return t" }, { "identifier": "space_timesteps", "path": "diffusion/respace.py", "snippet": "def space_timesteps(num_timesteps, section_counts):\n \"\"\"\n Create a list of timesteps to use from an original diffusion process,\n given the number of timesteps we want to take from equally-sized portions\n of the original process.\n\n For example, if there's 300 timesteps and the section counts are [10,15,20]\n then the first 100 timesteps are strided to be 10 timesteps, the second 100\n are strided to be 15 timesteps, and the final 100 are strided to be 20.\n\n If the stride is a string starting with \"ddim\", then the fixed striding\n from the DDIM paper is used, and only one section is allowed.\n\n :param num_timesteps: the number of diffusion steps in the original\n process to divide up.\n :param section_counts: either a list of numbers, or a string containing\n comma-separated numbers, indicating the step count\n per section. As a special case, use \"ddimN\" where N\n is a number of steps to use the striding from the\n DDIM paper.\n :return: a set of diffusion steps from the original process to use.\n \"\"\"\n if isinstance(section_counts, str):\n if section_counts.startswith(\"ddim\"):\n desired_count = int(section_counts[len(\"ddim\") :])\n for i in range(1, num_timesteps):\n if len(range(0, num_timesteps, i)) == desired_count:\n return set(range(0, num_timesteps, i))\n raise ValueError(\n f\"cannot create exactly {num_timesteps} steps with an integer stride\"\n )\n section_counts = [int(x) for x in section_counts.split(\",\")]\n size_per = num_timesteps // len(section_counts)\n extra = num_timesteps % len(section_counts)\n start_idx = 0\n all_steps = []\n for i, section_count in enumerate(section_counts):\n size = size_per + (1 if i < extra else 0)\n if size < section_count:\n raise ValueError(\n f\"cannot divide section of {size} steps into {section_count}\"\n )\n if section_count <= 1:\n frac_stride = 1\n else:\n frac_stride = (size - 1) / (section_count - 1)\n cur_idx = 0.0\n taken_steps = []\n for _ in range(section_count):\n taken_steps.append(start_idx + round(cur_idx))\n cur_idx += frac_stride\n all_steps += taken_steps\n start_idx += size\n return set(all_steps)" }, { "identifier": "fixseed", "path": "util/fixseed.py", "snippet": "def fixseed(seed):\n torch.backends.cudnn.benchmark = False\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)" }, { "identifier": "ClearmlPlatform", "path": "run/train_platforms.py", "snippet": "class ClearmlPlatform(TrainPlatform):\n def __init__(self, save_dir):\n from clearml import Task\n path, name = os.path.split(save_dir)\n self.task = Task.init(project_name='motion_diffusion',\n task_name=name,\n output_uri=path)\n self.logger = self.task.get_logger()\n\n def report_scalar(self, name, value, iteration, group_name):\n self.logger.report_scalar(title=group_name, series=name, iteration=iteration, value=value)\n\n def report_args(self, args, name):\n self.task.connect(args, name=name)\n\n def close(self):\n self.task.close()" }, { "identifier": "TensorboardPlatform", "path": "run/train_platforms.py", "snippet": "class TensorboardPlatform(TrainPlatform):\n def __init__(self, save_dir):\n from torch.utils.tensorboard import SummaryWriter\n self.writer = SummaryWriter(log_dir=save_dir)\n\n def report_scalar(self, name, value, iteration, group_name=None):\n self.writer.add_scalar(f'{group_name}/{name}', value, iteration)\n\n def close(self):\n self.writer.close()" }, { "identifier": "NoPlatform", "path": "run/train_platforms.py", "snippet": "class NoPlatform(TrainPlatform):\n def __init__(self, save_dir):\n pass" } ]
from model.sdm import SceneDiffusionModel from diffusion import gaussian_diffusion as gd from diffusion.respace import SpacedDiffusion, space_timesteps from util.fixseed import fixseed from run.train_platforms import ClearmlPlatform, TensorboardPlatform, NoPlatform # required for the eval operation
7,854
diffusion = create_gaussian_diffusion(get_default_diffusion()) return model, diffusion def get_default_model_proxd(): return { 'seq_len': 256, 'modality': 'text', 'clip_version': 'ViT-B/32', 'clip_dim': 512, 'dropout': 0.1, 'n_layer': 6, 'n_head': 8, 'f_vert': 64, 'dim_ff': 512, 'd_hid': 256, 'mesh_ds_dir': "data/mesh_ds", 'posa_path': None, 'latent_dim': 128, 'pcd_dim': 3, 'cond_mask_prob': 1.0, 'device': 0, 'vert_dims': 655, 'obj_cat': 8, 'data_rep': 'rot6d', 'njoints': 251, } def get_default_model_humanise(): return { 'seq_len': 256, 'modality': 'text', 'clip_version': 'ViT-B/32', 'clip_dim': 512, 'dropout': 0.1, 'n_layer': 6, 'n_head': 8, 'f_vert': 64, 'dim_ff': 512, 'd_hid': 256, 'mesh_ds_dir': "data/mesh_ds", 'posa_path': None, 'latent_dim': 128, 'pcd_dim': 3, 'cond_mask_prob': 1.0, 'device': 0, 'vert_dims': 655, 'obj_cat': 8, 'data_rep': 'rot6d', 'njoints': 251, 'max_cats': 11, } def get_default_diffusion(): args = { "lambda_fc": 0.0, "lambda_rcxyz": 0.0, "lambda_vel": 0.0, "lambda_cat": 0.1, "noise_schedule": "cosine", "sigma_small": True, } return args def get_model_args(): return { "arch": "trans_enc", "batch_size": 64, "cond_mask_prob": 0.1, "cuda": True, "data_dir": "", "dataset": "humanml", "device": 0, "diffusion_steps": 1000, "emb_trans_dec": False, "eval_batch_size": 32, "eval_during_training": False, "eval_num_samples": 1000, "eval_rep_times": 3, "eval_split": "test", "lambda_fc": 0.0, "lambda_rcxyz": 0.0, "lambda_vel": 0.0, "lambda_cat": 0.05, "latent_dim": 512, "layers": 8, "log_interval": 1000, "lr": 0.0001, "lr_anneal_steps": 0, "noise_schedule": "cosine", "num_frames": 60, "num_steps": 600000, "overwrite": False, "resume_checkpoint": "", "save_dir": "save/my_humanml_trans_enc_512", "save_interval": 50000, "seed": 10, "sigma_small": True, "train_platform_type": "NoPlatform", "unconstrained": False, "weight_decay": 0.0 } def create_gaussian_diffusion(args): # default params predict_xstart = True # we always predict x_start (a.k.a. x0), that's our deal! steps = 1000 scale_beta = 1. # no scaling timestep_respacing = '' # can be used for ddim sampling, we don't use it. learn_sigma = False rescale_timesteps = False betas = gd.get_named_beta_schedule(args['noise_schedule'], steps, scale_beta) loss_type = gd.LossType.MSE if not timestep_respacing: timestep_respacing = [steps]
def load_model_wo_clip(model, state_dict): missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False) assert len(unexpected_keys) == 0 assert all([k.startswith('clip_model.') for k in missing_keys]) def create_model_and_diffusion(datatype): # model = SceneDiffusionModel(**get_model_args(args, data)) if datatype == "proxd": model = SceneDiffusionModel(**get_default_model_proxd()) else: model = SceneDiffusionModel(**get_default_model_humanise()) diffusion = create_gaussian_diffusion(get_default_diffusion()) return model, diffusion def get_default_model_proxd(): return { 'seq_len': 256, 'modality': 'text', 'clip_version': 'ViT-B/32', 'clip_dim': 512, 'dropout': 0.1, 'n_layer': 6, 'n_head': 8, 'f_vert': 64, 'dim_ff': 512, 'd_hid': 256, 'mesh_ds_dir': "data/mesh_ds", 'posa_path': None, 'latent_dim': 128, 'pcd_dim': 3, 'cond_mask_prob': 1.0, 'device': 0, 'vert_dims': 655, 'obj_cat': 8, 'data_rep': 'rot6d', 'njoints': 251, } def get_default_model_humanise(): return { 'seq_len': 256, 'modality': 'text', 'clip_version': 'ViT-B/32', 'clip_dim': 512, 'dropout': 0.1, 'n_layer': 6, 'n_head': 8, 'f_vert': 64, 'dim_ff': 512, 'd_hid': 256, 'mesh_ds_dir': "data/mesh_ds", 'posa_path': None, 'latent_dim': 128, 'pcd_dim': 3, 'cond_mask_prob': 1.0, 'device': 0, 'vert_dims': 655, 'obj_cat': 8, 'data_rep': 'rot6d', 'njoints': 251, 'max_cats': 11, } def get_default_diffusion(): args = { "lambda_fc": 0.0, "lambda_rcxyz": 0.0, "lambda_vel": 0.0, "lambda_cat": 0.1, "noise_schedule": "cosine", "sigma_small": True, } return args def get_model_args(): return { "arch": "trans_enc", "batch_size": 64, "cond_mask_prob": 0.1, "cuda": True, "data_dir": "", "dataset": "humanml", "device": 0, "diffusion_steps": 1000, "emb_trans_dec": False, "eval_batch_size": 32, "eval_during_training": False, "eval_num_samples": 1000, "eval_rep_times": 3, "eval_split": "test", "lambda_fc": 0.0, "lambda_rcxyz": 0.0, "lambda_vel": 0.0, "lambda_cat": 0.05, "latent_dim": 512, "layers": 8, "log_interval": 1000, "lr": 0.0001, "lr_anneal_steps": 0, "noise_schedule": "cosine", "num_frames": 60, "num_steps": 600000, "overwrite": False, "resume_checkpoint": "", "save_dir": "save/my_humanml_trans_enc_512", "save_interval": 50000, "seed": 10, "sigma_small": True, "train_platform_type": "NoPlatform", "unconstrained": False, "weight_decay": 0.0 } def create_gaussian_diffusion(args): # default params predict_xstart = True # we always predict x_start (a.k.a. x0), that's our deal! steps = 1000 scale_beta = 1. # no scaling timestep_respacing = '' # can be used for ddim sampling, we don't use it. learn_sigma = False rescale_timesteps = False betas = gd.get_named_beta_schedule(args['noise_schedule'], steps, scale_beta) loss_type = gd.LossType.MSE if not timestep_respacing: timestep_respacing = [steps]
return SpacedDiffusion(
2
2023-11-06 07:55:51+00:00
12k
Harvard-Ophthalmology-AI-Lab/FairSeg
SAMed/segment_anything/automatic_mask_generator.py
[ { "identifier": "Sam", "path": "SAMed/segment_anything/modeling/sam.py", "snippet": "class Sam(nn.Module):\n mask_threshold: float = 0.0\n image_format: str = \"RGB\"\n\n def __init__(\n self,\n image_encoder: ImageEncoderViT,\n prompt_encoder: PromptEncoder,\n mask_decoder: MaskDecoder,\n pixel_mean: List[float] = [123.675, 116.28, 103.53],\n pixel_std: List[float] = [58.395, 57.12, 57.375],\n ) -> None:\n \"\"\"\n SAM predicts object masks from an image and input prompts.\n\n Arguments:\n image_encoder (ImageEncoderViT): The backbone used to encode the\n image into image embeddings that allow for efficient mask prediction.\n prompt_encoder (PromptEncoder): Encodes various types of input prompts.\n mask_decoder (MaskDecoder): Predicts masks from the image embeddings\n and encoded prompts.\n pixel_mean (list(float)): Mean values for normalizing pixels in the input image.\n pixel_std (list(float)): Std values for normalizing pixels in the input image.\n \"\"\"\n super().__init__()\n self.image_encoder = image_encoder\n self.prompt_encoder = prompt_encoder\n self.mask_decoder = mask_decoder\n self.register_buffer(\"pixel_mean\", torch.Tensor(pixel_mean).view(-1, 1, 1), False)\n self.register_buffer(\"pixel_std\", torch.Tensor(pixel_std).view(-1, 1, 1), False)\n\n @property\n def device(self) -> Any:\n return self.pixel_mean.device\n\n def forward(self, batched_input, multimask_output, image_size):\n if isinstance(batched_input, list):\n outputs = self.forward_test(batched_input, multimask_output)\n else:\n outputs = self.forward_train(batched_input, multimask_output, image_size)\n return outputs\n\n def forward_train(self, batched_input, multimask_output, image_size):\n input_images = self.preprocess(batched_input)\n image_embeddings = self.image_encoder(input_images)\n sparse_embeddings, dense_embeddings = self.prompt_encoder(\n points=None, boxes=None, masks=None\n )\n low_res_masks, iou_predictions = self.mask_decoder(\n image_embeddings=image_embeddings,\n image_pe=self.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output\n )\n masks = self.postprocess_masks(\n low_res_masks,\n input_size=(image_size, image_size),\n original_size=(image_size, image_size)\n )\n outputs = {\n 'masks': masks,\n 'iou_predictions': iou_predictions,\n 'low_res_logits': low_res_masks\n }\n return outputs\n\n @torch.no_grad()\n def forward_test(\n self,\n batched_input: List[Dict[str, Any]],\n multimask_output: bool,\n ) -> List[Dict[str, torch.Tensor]]:\n \"\"\"\n Predicts masks end-to-end from provided images and prompts.\n If prompts are not known in advance, using SamPredictor is\n recommended over calling the model directly.\n\n Arguments:\n batched_input (list(dict)): A list over input images, each a\n dictionary with the following keys. A prompt key can be\n excluded if it is not present.\n 'image': The image as a torch tensor in 3xHxW format,\n already transformed for input to the model.\n 'original_size': (tuple(int, int)) The original size of\n the image before transformation, as (H, W).\n 'point_coords': (torch.Tensor) Batched point prompts for\n this image, with shape BxNx2. Already transformed to the\n input frame of the model.\n 'point_labels': (torch.Tensor) Batched labels for point prompts,\n with shape BxN.\n 'boxes': (torch.Tensor) Batched box inputs, with shape Bx4.\n Already transformed to the input frame of the model.\n 'mask_inputs': (torch.Tensor) Batched mask inputs to the model,\n in the form Bx1xHxW.\n multimask_output (bool): Whether the model should predict multiple\n disambiguating masks, or return a single mask.\n\n Returns:\n (list(dict)): A list over input images, where each element is\n as dictionary with the following keys.\n 'masks': (torch.Tensor) Batched binary mask predictions,\n with shape BxCxHxW, where B is the number of input promts,\n C is determiend by multimask_output, and (H, W) is the\n original size of the image.\n 'iou_predictions': (torch.Tensor) The model's predictions\n of mask quality, in shape BxC.\n 'low_res_logits': (torch.Tensor) Low resolution logits with\n shape BxCxHxW, where H=W=256. Can be passed as mask input\n to subsequent iterations of prediction.\n \"\"\"\n input_images = torch.stack([self.preprocess(x[\"image\"]) for x in batched_input], dim=0)\n image_embeddings = self.image_encoder(input_images)\n\n outputs = []\n for image_record, curr_embedding in zip(batched_input, image_embeddings):\n if \"point_coords\" in image_record:\n points = (image_record[\"point_coords\"], image_record[\"point_labels\"])\n else:\n points = None\n sparse_embeddings, dense_embeddings = self.prompt_encoder(\n points=points,\n boxes=image_record.get(\"boxes\", None),\n masks=image_record.get(\"mask_inputs\", None),\n )\n low_res_masks, iou_predictions = self.mask_decoder(\n image_embeddings=curr_embedding.unsqueeze(0),\n image_pe=self.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output,\n )\n masks = self.postprocess_masks(\n low_res_masks,\n input_size=image_record[\"image\"].shape[-2:],\n original_size=image_record[\"original_size\"],\n )\n masks = masks > self.mask_threshold\n outputs.append(\n {\n \"masks\": masks,\n \"iou_predictions\": iou_predictions,\n \"low_res_logits\": low_res_masks,\n }\n )\n return outputs\n\n def postprocess_masks(\n self,\n masks: torch.Tensor,\n input_size: Tuple[int, ...],\n original_size: Tuple[int, ...],\n ) -> torch.Tensor:\n \"\"\"\n Remove padding and upscale masks to the original image size.\n\n Arguments:\n masks (torch.Tensor): Batched masks from the mask_decoder,\n in BxCxHxW format.\n input_size (tuple(int, int)): The size of the image input to the\n model, in (H, W) format. Used to remove padding.\n original_size (tuple(int, int)): The original size of the image\n before resizing for input to the model, in (H, W) format.\n\n Returns:\n (torch.Tensor): Batched masks in BxCxHxW format, where (H, W)\n is given by original_size.\n \"\"\"\n masks = F.interpolate(\n masks,\n (self.image_encoder.img_size, self.image_encoder.img_size),\n mode=\"bilinear\",\n align_corners=False,\n )\n masks = masks[..., : input_size[0], : input_size[1]]\n masks = F.interpolate(masks, original_size, mode=\"bilinear\", align_corners=False)\n return masks\n\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n\n # Pad\n h, w = x.shape[-2:]\n padh = self.image_encoder.img_size - h\n padw = self.image_encoder.img_size - w\n x = F.pad(x, (0, padw, 0, padh))\n return x" }, { "identifier": "SamPredictor", "path": "SAMed/segment_anything/predictor.py", "snippet": "class SamPredictor:\n def __init__(\n self,\n sam_model: Sam,\n ) -> None:\n \"\"\"\n Uses SAM to calculate the image embedding for an image, and then\n allow repeated, efficient mask prediction given prompts.\n\n Arguments:\n sam_model (Sam): The model to use for mask prediction.\n \"\"\"\n super().__init__()\n self.model = sam_model\n self.transform = ResizeLongestSide(sam_model.image_encoder.img_size)\n self.reset_image()\n\n def set_image(\n self,\n image: np.ndarray,\n image_format: str = \"RGB\",\n ) -> None:\n \"\"\"\n Calculates the image embeddings for the provided image, allowing\n masks to be predicted with the 'predict' method.\n\n Arguments:\n image (np.ndarray): The image for calculating masks. Expects an\n image in HWC uint8 format, with pixel values in [0, 255].\n image_format (str): The color format of the image, in ['RGB', 'BGR'].\n \"\"\"\n assert image_format in [\n \"RGB\",\n \"BGR\",\n ], f\"image_format must be in ['RGB', 'BGR'], is {image_format}.\"\n if image_format != self.model.image_format:\n image = image[..., ::-1]\n\n # Transform the image to the form expected by the model\n input_image = self.transform.apply_image(image)\n input_image_torch = torch.as_tensor(input_image, device=self.device)\n input_image_torch = input_image_torch.permute(2, 0, 1).contiguous()[None, :, :, :]\n\n self.set_torch_image(input_image_torch, image.shape[:2])\n\n @torch.no_grad()\n def set_torch_image(\n self,\n transformed_image: torch.Tensor,\n original_image_size: Tuple[int, ...],\n ) -> None:\n \"\"\"\n Calculates the image embeddings for the provided image, allowing\n masks to be predicted with the 'predict' method. Expects the input\n image to be already transformed to the format expected by the model.\n\n Arguments:\n transformed_image (torch.Tensor): The input image, with shape\n 1x3xHxW, which has been transformed with ResizeLongestSide.\n original_image_size (tuple(int, int)): The size of the image\n before transformation, in (H, W) format.\n \"\"\"\n assert (\n len(transformed_image.shape) == 4\n and transformed_image.shape[1] == 3\n and max(*transformed_image.shape[2:]) == self.model.image_encoder.img_size\n ), f\"set_torch_image input must be BCHW with long side {self.model.image_encoder.img_size}.\"\n self.reset_image()\n\n self.original_size = original_image_size\n self.input_size = tuple(transformed_image.shape[-2:])\n input_image = self.model.preprocess(transformed_image)\n self.features = self.model.image_encoder(input_image)\n self.is_image_set = True\n\n def predict(\n self,\n point_coords: Optional[np.ndarray] = None,\n point_labels: Optional[np.ndarray] = None,\n box: Optional[np.ndarray] = None,\n mask_input: Optional[np.ndarray] = None,\n multimask_output: bool = True,\n return_logits: bool = False,\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"\n Predict masks for the given input prompts, using the currently set image.\n\n Arguments:\n point_coords (np.ndarray or None): A Nx2 array of point prompts to the\n model. Each point is in (X,Y) in pixels.\n point_labels (np.ndarray or None): A length N array of labels for the\n point prompts. 1 indicates a foreground point and 0 indicates a\n background point.\n box (np.ndarray or None): A length 4 array given a box prompt to the\n model, in XYXY format.\n mask_input (np.ndarray): A low resolution mask input to the model, typically\n coming from a previous prediction iteration. Has form 1xHxW, where\n for SAM, H=W=256.\n multimask_output (bool): If true, the model will return three masks.\n For ambiguous input prompts (such as a single click), this will often\n produce better masks than a single prediction. If only a single\n mask is needed, the model's predicted quality score can be used\n to select the best mask. For non-ambiguous prompts, such as multiple\n input prompts, multimask_output=False can give better results.\n return_logits (bool): If true, returns un-thresholded masks logits\n instead of a binary mask.\n\n Returns:\n (np.ndarray): The output masks in CxHxW format, where C is the\n number of masks, and (H, W) is the original image size.\n (np.ndarray): An array of length C containing the model's\n predictions for the quality of each mask.\n (np.ndarray): An array of shape CxHxW, where C is the number\n of masks and H=W=256. These low resolution logits can be passed to\n a subsequent iteration as mask input.\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\"An image must be set with .set_image(...) before mask prediction.\")\n\n # Transform input prompts\n coords_torch, labels_torch, box_torch, mask_input_torch = None, None, None, None\n if point_coords is not None:\n assert (\n point_labels is not None\n ), \"point_labels must be supplied if point_coords is supplied.\"\n point_coords = self.transform.apply_coords(point_coords, self.original_size)\n coords_torch = torch.as_tensor(point_coords, dtype=torch.float, device=self.device)\n labels_torch = torch.as_tensor(point_labels, dtype=torch.int, device=self.device)\n coords_torch, labels_torch = coords_torch[None, :, :], labels_torch[None, :]\n if box is not None:\n box = self.transform.apply_boxes(box, self.original_size)\n box_torch = torch.as_tensor(box, dtype=torch.float, device=self.device)\n box_torch = box_torch[None, :]\n if mask_input is not None:\n mask_input_torch = torch.as_tensor(mask_input, dtype=torch.float, device=self.device)\n mask_input_torch = mask_input_torch[None, :, :, :]\n\n masks, iou_predictions, low_res_masks = self.predict_torch(\n coords_torch,\n labels_torch,\n box_torch,\n mask_input_torch,\n multimask_output,\n return_logits=return_logits,\n )\n\n masks = masks[0].detach().cpu().numpy()\n iou_predictions = iou_predictions[0].detach().cpu().numpy()\n low_res_masks = low_res_masks[0].detach().cpu().numpy()\n return masks, iou_predictions, low_res_masks\n\n @torch.no_grad()\n def predict_torch(\n self,\n point_coords: Optional[torch.Tensor],\n point_labels: Optional[torch.Tensor],\n boxes: Optional[torch.Tensor] = None,\n mask_input: Optional[torch.Tensor] = None,\n multimask_output: bool = True,\n return_logits: bool = False,\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"\n Predict masks for the given input prompts, using the currently set image.\n Input prompts are batched torch tensors and are expected to already be\n transformed to the input frame using ResizeLongestSide.\n\n Arguments:\n point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the\n model. Each point is in (X,Y) in pixels.\n point_labels (torch.Tensor or None): A BxN array of labels for the\n point prompts. 1 indicates a foreground point and 0 indicates a\n background point.\n box (np.ndarray or None): A Bx4 array given a box prompt to the\n model, in XYXY format.\n mask_input (np.ndarray): A low resolution mask input to the model, typically\n coming from a previous prediction iteration. Has form Bx1xHxW, where\n for SAM, H=W=256. Masks returned by a previous iteration of the\n predict method do not need further transformation.\n multimask_output (bool): If true, the model will return three masks.\n For ambiguous input prompts (such as a single click), this will often\n produce better masks than a single prediction. If only a single\n mask is needed, the model's predicted quality score can be used\n to select the best mask. For non-ambiguous prompts, such as multiple\n input prompts, multimask_output=False can give better results.\n return_logits (bool): If true, returns un-thresholded masks logits\n instead of a binary mask.\n\n Returns:\n (torch.Tensor): The output masks in BxCxHxW format, where C is the\n number of masks, and (H, W) is the original image size.\n (torch.Tensor): An array of shape BxC containing the model's\n predictions for the quality of each mask.\n (torch.Tensor): An array of shape BxCxHxW, where C is the number\n of masks and H=W=256. These low res logits can be passed to\n a subsequent iteration as mask input.\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\"An image must be set with .set_image(...) before mask prediction.\")\n\n if point_coords is not None:\n points = (point_coords, point_labels)\n else:\n points = None\n\n # Embed prompts\n sparse_embeddings, dense_embeddings = self.model.prompt_encoder(\n points=points,\n boxes=boxes,\n masks=mask_input,\n )\n\n # Predict masks\n low_res_masks, iou_predictions = self.model.mask_decoder(\n image_embeddings=self.features,\n image_pe=self.model.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output,\n )\n\n # Upscale the masks to the original image resolution\n masks = self.model.postprocess_masks(low_res_masks, self.input_size, self.original_size)\n\n if not return_logits:\n masks = masks > self.model.mask_threshold\n\n return masks, iou_predictions, low_res_masks\n\n def get_image_embedding(self) -> torch.Tensor:\n \"\"\"\n Returns the image embeddings for the currently set image, with\n shape 1xCxHxW, where C is the embedding dimension and (H,W) are\n the embedding spatial dimension of SAM (typically C=256, H=W=64).\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\n \"An image must be set with .set_image(...) to generate an embedding.\"\n )\n assert self.features is not None, \"Features must exist if an image has been set.\"\n return self.features\n\n @property\n def device(self) -> torch.device:\n return self.model.device\n\n def reset_image(self) -> None:\n \"\"\"Resets the currently set image.\"\"\"\n self.is_image_set = False\n self.features = None\n self.orig_h = None\n self.orig_w = None\n self.input_h = None\n self.input_w = None" }, { "identifier": "MaskData", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "class MaskData:\n \"\"\"\n A structure for storing masks and their related data in batched format.\n Implements basic filtering and concatenation.\n \"\"\"\n\n def __init__(self, **kwargs) -> None:\n for v in kwargs.values():\n assert isinstance(\n v, (list, np.ndarray, torch.Tensor)\n ), \"MaskData only supports list, numpy arrays, and torch tensors.\"\n self._stats = dict(**kwargs)\n\n def __setitem__(self, key: str, item: Any) -> None:\n assert isinstance(\n item, (list, np.ndarray, torch.Tensor)\n ), \"MaskData only supports list, numpy arrays, and torch tensors.\"\n self._stats[key] = item\n\n def __delitem__(self, key: str) -> None:\n del self._stats[key]\n\n def __getitem__(self, key: str) -> Any:\n return self._stats[key]\n\n def items(self) -> ItemsView[str, Any]:\n return self._stats.items()\n\n def filter(self, keep: torch.Tensor) -> None:\n for k, v in self._stats.items():\n if v is None:\n self._stats[k] = None\n elif isinstance(v, torch.Tensor):\n self._stats[k] = v[torch.as_tensor(keep, device=v.device)]\n elif isinstance(v, np.ndarray):\n self._stats[k] = v[keep.detach().cpu().numpy()]\n elif isinstance(v, list) and keep.dtype == torch.bool:\n self._stats[k] = [a for i, a in enumerate(v) if keep[i]]\n elif isinstance(v, list):\n self._stats[k] = [v[i] for i in keep]\n else:\n raise TypeError(f\"MaskData key {k} has an unsupported type {type(v)}.\")\n\n def cat(self, new_stats: \"MaskData\") -> None:\n for k, v in new_stats.items():\n if k not in self._stats or self._stats[k] is None:\n self._stats[k] = deepcopy(v)\n elif isinstance(v, torch.Tensor):\n self._stats[k] = torch.cat([self._stats[k], v], dim=0)\n elif isinstance(v, np.ndarray):\n self._stats[k] = np.concatenate([self._stats[k], v], axis=0)\n elif isinstance(v, list):\n self._stats[k] = self._stats[k] + deepcopy(v)\n else:\n raise TypeError(f\"MaskData key {k} has an unsupported type {type(v)}.\")\n\n def to_numpy(self) -> None:\n for k, v in self._stats.items():\n if isinstance(v, torch.Tensor):\n self._stats[k] = v.detach().cpu().numpy()" }, { "identifier": "area_from_rle", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def area_from_rle(rle: Dict[str, Any]) -> int:\n return sum(rle[\"counts\"][1::2])" }, { "identifier": "batch_iterator", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]:\n assert len(args) > 0 and all(\n len(a) == len(args[0]) for a in args\n ), \"Batched iteration must have inputs of all the same size.\"\n n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0)\n for b in range(n_batches):\n yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args]" }, { "identifier": "batched_mask_to_box", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Calculates boxes in XYXY format around masks. Return [0,0,0,0] for\n an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4.\n \"\"\"\n # torch.max below raises an error on empty inputs, just skip in this case\n if torch.numel(masks) == 0:\n return torch.zeros(*masks.shape[:-2], 4, device=masks.device)\n\n # Normalize shape to CxHxW\n shape = masks.shape\n h, w = shape[-2:]\n if len(shape) > 2:\n masks = masks.flatten(0, -3)\n else:\n masks = masks.unsqueeze(0)\n\n # Get top and bottom edges\n in_height, _ = torch.max(masks, dim=-1)\n in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :]\n bottom_edges, _ = torch.max(in_height_coords, dim=-1)\n in_height_coords = in_height_coords + h * (~in_height)\n top_edges, _ = torch.min(in_height_coords, dim=-1)\n\n # Get left and right edges\n in_width, _ = torch.max(masks, dim=-2)\n in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :]\n right_edges, _ = torch.max(in_width_coords, dim=-1)\n in_width_coords = in_width_coords + w * (~in_width)\n left_edges, _ = torch.min(in_width_coords, dim=-1)\n\n # If the mask is empty the right edge will be to the left of the left edge.\n # Replace these boxes with [0, 0, 0, 0]\n empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)\n out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)\n out = out * (~empty_filter).unsqueeze(-1)\n\n # Return to original shape\n if len(shape) > 2:\n out = out.reshape(*shape[:-2], 4)\n else:\n out = out[0]\n\n return out" }, { "identifier": "box_xyxy_to_xywh", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor:\n box_xywh = deepcopy(box_xyxy)\n box_xywh[2] = box_xywh[2] - box_xywh[0]\n box_xywh[3] = box_xywh[3] - box_xywh[1]\n return box_xywh" }, { "identifier": "build_all_layer_point_grids", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def build_all_layer_point_grids(\n n_per_side: int, n_layers: int, scale_per_layer: int\n) -> List[np.ndarray]:\n \"\"\"Generates point grids for all crop layers.\"\"\"\n points_by_layer = []\n for i in range(n_layers + 1):\n n_points = int(n_per_side / (scale_per_layer**i))\n points_by_layer.append(build_point_grid(n_points))\n return points_by_layer" }, { "identifier": "calculate_stability_score", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def calculate_stability_score(\n masks: torch.Tensor, mask_threshold: float, threshold_offset: float\n) -> torch.Tensor:\n \"\"\"\n Computes the stability score for a batch of masks. The stability\n score is the IoU between the binary masks obtained by thresholding\n the predicted mask logits at high and low values.\n \"\"\"\n # One mask is always contained inside the other.\n # Save memory by preventing unnecesary cast to torch.int64\n intersections = (\n (masks > (mask_threshold + threshold_offset))\n .sum(-1, dtype=torch.int16)\n .sum(-1, dtype=torch.int32)\n )\n unions = (\n (masks > (mask_threshold - threshold_offset))\n .sum(-1, dtype=torch.int16)\n .sum(-1, dtype=torch.int32)\n )\n return intersections / unions" }, { "identifier": "coco_encode_rle", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]:\n from pycocotools import mask as mask_utils # type: ignore\n\n h, w = uncompressed_rle[\"size\"]\n rle = mask_utils.frPyObjects(uncompressed_rle, h, w)\n rle[\"counts\"] = rle[\"counts\"].decode(\"utf-8\") # Necessary to serialize with json\n return rle" }, { "identifier": "generate_crop_boxes", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def generate_crop_boxes(\n im_size: Tuple[int, ...], n_layers: int, overlap_ratio: float\n) -> Tuple[List[List[int]], List[int]]:\n \"\"\"\n Generates a list of crop boxes of different sizes. Each layer\n has (2**i)**2 boxes for the ith layer.\n \"\"\"\n crop_boxes, layer_idxs = [], []\n im_h, im_w = im_size\n short_side = min(im_h, im_w)\n\n # Original image\n crop_boxes.append([0, 0, im_w, im_h])\n layer_idxs.append(0)\n\n def crop_len(orig_len, n_crops, overlap):\n return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops))\n\n for i_layer in range(n_layers):\n n_crops_per_side = 2 ** (i_layer + 1)\n overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side))\n\n crop_w = crop_len(im_w, n_crops_per_side, overlap)\n crop_h = crop_len(im_h, n_crops_per_side, overlap)\n\n crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)]\n crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)]\n\n # Crops in XYWH format\n for x0, y0 in product(crop_box_x0, crop_box_y0):\n box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)]\n crop_boxes.append(box)\n layer_idxs.append(i_layer + 1)\n\n return crop_boxes, layer_idxs" }, { "identifier": "is_box_near_crop_edge", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def is_box_near_crop_edge(\n boxes: torch.Tensor, crop_box: List[int], orig_box: List[int], atol: float = 20.0\n) -> torch.Tensor:\n \"\"\"Filter masks at the edge of a crop, but not at the edge of the original image.\"\"\"\n crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)\n orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)\n boxes = uncrop_boxes_xyxy(boxes, crop_box).float()\n near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0)\n near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0)\n near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge)\n return torch.any(near_crop_edge, dim=1)" }, { "identifier": "mask_to_rle_pytorch", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]:\n \"\"\"\n Encodes masks to an uncompressed RLE, in the format expected by\n pycoco tools.\n \"\"\"\n # Put in fortran order and flatten h,w\n b, h, w = tensor.shape\n tensor = tensor.permute(0, 2, 1).flatten(1)\n\n # Compute change indices\n diff = tensor[:, 1:] ^ tensor[:, :-1]\n change_indices = diff.nonzero()\n\n # Encode run length\n out = []\n for i in range(b):\n cur_idxs = change_indices[change_indices[:, 0] == i, 1]\n cur_idxs = torch.cat(\n [\n torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device),\n cur_idxs + 1,\n torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device),\n ]\n )\n btw_idxs = cur_idxs[1:] - cur_idxs[:-1]\n counts = [] if tensor[i, 0] == 0 else [0]\n counts.extend(btw_idxs.detach().cpu().tolist())\n out.append({\"size\": [h, w], \"counts\": counts})\n return out" }, { "identifier": "remove_small_regions", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def remove_small_regions(\n mask: np.ndarray, area_thresh: float, mode: str\n) -> Tuple[np.ndarray, bool]:\n \"\"\"\n Removes small disconnected regions and holes in a mask. Returns the\n mask and an indicator of if the mask has been modified.\n \"\"\"\n import cv2 # type: ignore\n\n assert mode in [\"holes\", \"islands\"]\n correct_holes = mode == \"holes\"\n working_mask = (correct_holes ^ mask).astype(np.uint8)\n n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8)\n sizes = stats[:, -1][1:] # Row 0 is background label\n small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh]\n if len(small_regions) == 0:\n return mask, False\n fill_labels = [0] + small_regions\n if not correct_holes:\n fill_labels = [i for i in range(n_labels) if i not in fill_labels]\n # If every region is below threshold, keep largest\n if len(fill_labels) == 0:\n fill_labels = [int(np.argmax(sizes)) + 1]\n mask = np.isin(regions, fill_labels)\n return mask, True" }, { "identifier": "rle_to_mask", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray:\n \"\"\"Compute a binary mask from an uncompressed RLE.\"\"\"\n h, w = rle[\"size\"]\n mask = np.empty(h * w, dtype=bool)\n idx = 0\n parity = False\n for count in rle[\"counts\"]:\n mask[idx : idx + count] = parity\n idx += count\n parity ^= True\n mask = mask.reshape(w, h)\n return mask.transpose() # Put in C order" }, { "identifier": "uncrop_boxes_xyxy", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\n x0, y0, _, _ = crop_box\n offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device)\n # Check if boxes has a channel dimension\n if len(boxes.shape) == 3:\n offset = offset.unsqueeze(1)\n return boxes + offset" }, { "identifier": "uncrop_masks", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def uncrop_masks(\n masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int\n) -> torch.Tensor:\n x0, y0, x1, y1 = crop_box\n if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h:\n return masks\n # Coordinate transform masks\n pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0)\n pad = (x0, pad_x - x0, y0, pad_y - y0)\n return torch.nn.functional.pad(masks, pad, value=0)" }, { "identifier": "uncrop_points", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\n x0, y0, _, _ = crop_box\n offset = torch.tensor([[x0, y0]], device=points.device)\n # Check if points has a channel dimension\n if len(points.shape) == 3:\n offset = offset.unsqueeze(1)\n return points + offset" } ]
import numpy as np import torch import cv2 # type: ignore # noqa: F401 from torchvision.ops.boxes import batched_nms, box_area # type: ignore from typing import Any, Dict, List, Optional, Tuple from .modeling import Sam from .predictor import SamPredictor from .utils.amg import ( MaskData, area_from_rle, batch_iterator, batched_mask_to_box, box_xyxy_to_xywh, build_all_layer_point_grids, calculate_stability_score, coco_encode_rle, generate_crop_boxes, is_box_near_crop_edge, mask_to_rle_pytorch, remove_small_regions, rle_to_mask, uncrop_boxes_xyxy, uncrop_masks, uncrop_points, ) from pycocotools import mask as mask_utils # type: ignore # noqa: F401
10,171
Generates a grid of point prompts over the image, then filters low quality and duplicate masks. The default settings are chosen for SAM with a ViT-H backbone. Arguments: model (Sam): The SAM model to use for mask prediction. points_per_side (int or None): The number of points to be sampled along one side of the image. The total number of points is points_per_side**2. If None, 'point_grids' must provide explicit point sampling. points_per_batch (int): Sets the number of points run simultaneously by the model. Higher numbers may be faster but use more GPU memory. pred_iou_thresh (float): A filtering threshold in [0,1], using the model's predicted mask quality. stability_score_thresh (float): A filtering threshold in [0,1], using the stability of the mask under changes to the cutoff used to binarize the model's mask predictions. stability_score_offset (float): The amount to shift the cutoff when calculated the stability score. box_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks. crops_n_layers (int): If >0, mask prediction will be run again on crops of the image. Sets the number of layers to run, where each layer has 2**i_layer number of image crops. crops_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks between different crops. crop_overlap_ratio (float): Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of the image length. Later layers with more crops scale down this overlap. crop_n_points_downscale_factor (int): The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n. point_grids (list(np.ndarray) or None): A list over explicit grids of points used for sampling, normalized to [0,1]. The nth grid in the list is used in the nth crop layer. Exclusive with points_per_side. min_mask_region_area (int): If >0, postprocessing will be applied to remove disconnected regions and holes in masks with area smaller than min_mask_region_area. Requires opencv. output_mode (str): The form masks are returned in. Can be 'binary_mask', 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools. For large resolutions, 'binary_mask' may consume large amounts of memory. """ assert (points_per_side is None) != ( point_grids is None ), "Exactly one of points_per_side or point_grid must be provided." if points_per_side is not None: self.point_grids = build_all_layer_point_grids( points_per_side, crop_n_layers, crop_n_points_downscale_factor, ) elif point_grids is not None: self.point_grids = point_grids else: raise ValueError("Can't have both points_per_side and point_grid be None.") assert output_mode in [ "binary_mask", "uncompressed_rle", "coco_rle", ], f"Unknown output_mode {output_mode}." if output_mode == "coco_rle": if min_mask_region_area > 0: self.predictor = SamPredictor(model) self.points_per_batch = points_per_batch self.pred_iou_thresh = pred_iou_thresh self.stability_score_thresh = stability_score_thresh self.stability_score_offset = stability_score_offset self.box_nms_thresh = box_nms_thresh self.crop_n_layers = crop_n_layers self.crop_nms_thresh = crop_nms_thresh self.crop_overlap_ratio = crop_overlap_ratio self.crop_n_points_downscale_factor = crop_n_points_downscale_factor self.min_mask_region_area = min_mask_region_area self.output_mode = output_mode @torch.no_grad() def generate(self, image: np.ndarray) -> List[Dict[str, Any]]: """ Generates masks for the given image. Arguments: image (np.ndarray): The image to generate masks for, in HWC uint8 format. Returns: list(dict(str, any)): A list over records for masks. Each record is a dict containing the following keys: segmentation (dict(str, any) or np.ndarray): The mask. If output_mode='binary_mask', is an array of shape HW. Otherwise, is a dictionary containing the RLE. bbox (list(float)): The box around the mask, in XYWH format. area (int): The area in pixels of the mask. predicted_iou (float): The model's own prediction of the mask's quality. This is filtered by the pred_iou_thresh parameter. point_coords (list(list(float))): The point coordinates input to the model to generate this mask. stability_score (float): A measure of the mask's quality. This is filtered on using the stability_score_thresh parameter. crop_box (list(float)): The crop of the image used to generate the mask, given in XYWH format. """ # Generate masks mask_data = self._generate_masks(image) # Filter small disconnected regions and holes in masks if self.min_mask_region_area > 0: mask_data = self.postprocess_small_regions( mask_data, self.min_mask_region_area, max(self.box_nms_thresh, self.crop_nms_thresh), ) # Encode masks if self.output_mode == "coco_rle": mask_data["segmentations"] = [coco_encode_rle(rle) for rle in mask_data["rles"]] elif self.output_mode == "binary_mask":
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. class SamAutomaticMaskGenerator: def __init__( self, model: Sam, points_per_side: Optional[int] = 32, points_per_batch: int = 64, pred_iou_thresh: float = 0.88, stability_score_thresh: float = 0.95, stability_score_offset: float = 1.0, box_nms_thresh: float = 0.7, crop_n_layers: int = 0, crop_nms_thresh: float = 0.7, crop_overlap_ratio: float = 512 / 1500, crop_n_points_downscale_factor: int = 1, point_grids: Optional[List[np.ndarray]] = None, min_mask_region_area: int = 0, output_mode: str = "binary_mask", ) -> None: """ Using a SAM model, generates masks for the entire image. Generates a grid of point prompts over the image, then filters low quality and duplicate masks. The default settings are chosen for SAM with a ViT-H backbone. Arguments: model (Sam): The SAM model to use for mask prediction. points_per_side (int or None): The number of points to be sampled along one side of the image. The total number of points is points_per_side**2. If None, 'point_grids' must provide explicit point sampling. points_per_batch (int): Sets the number of points run simultaneously by the model. Higher numbers may be faster but use more GPU memory. pred_iou_thresh (float): A filtering threshold in [0,1], using the model's predicted mask quality. stability_score_thresh (float): A filtering threshold in [0,1], using the stability of the mask under changes to the cutoff used to binarize the model's mask predictions. stability_score_offset (float): The amount to shift the cutoff when calculated the stability score. box_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks. crops_n_layers (int): If >0, mask prediction will be run again on crops of the image. Sets the number of layers to run, where each layer has 2**i_layer number of image crops. crops_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks between different crops. crop_overlap_ratio (float): Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of the image length. Later layers with more crops scale down this overlap. crop_n_points_downscale_factor (int): The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n. point_grids (list(np.ndarray) or None): A list over explicit grids of points used for sampling, normalized to [0,1]. The nth grid in the list is used in the nth crop layer. Exclusive with points_per_side. min_mask_region_area (int): If >0, postprocessing will be applied to remove disconnected regions and holes in masks with area smaller than min_mask_region_area. Requires opencv. output_mode (str): The form masks are returned in. Can be 'binary_mask', 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools. For large resolutions, 'binary_mask' may consume large amounts of memory. """ assert (points_per_side is None) != ( point_grids is None ), "Exactly one of points_per_side or point_grid must be provided." if points_per_side is not None: self.point_grids = build_all_layer_point_grids( points_per_side, crop_n_layers, crop_n_points_downscale_factor, ) elif point_grids is not None: self.point_grids = point_grids else: raise ValueError("Can't have both points_per_side and point_grid be None.") assert output_mode in [ "binary_mask", "uncompressed_rle", "coco_rle", ], f"Unknown output_mode {output_mode}." if output_mode == "coco_rle": if min_mask_region_area > 0: self.predictor = SamPredictor(model) self.points_per_batch = points_per_batch self.pred_iou_thresh = pred_iou_thresh self.stability_score_thresh = stability_score_thresh self.stability_score_offset = stability_score_offset self.box_nms_thresh = box_nms_thresh self.crop_n_layers = crop_n_layers self.crop_nms_thresh = crop_nms_thresh self.crop_overlap_ratio = crop_overlap_ratio self.crop_n_points_downscale_factor = crop_n_points_downscale_factor self.min_mask_region_area = min_mask_region_area self.output_mode = output_mode @torch.no_grad() def generate(self, image: np.ndarray) -> List[Dict[str, Any]]: """ Generates masks for the given image. Arguments: image (np.ndarray): The image to generate masks for, in HWC uint8 format. Returns: list(dict(str, any)): A list over records for masks. Each record is a dict containing the following keys: segmentation (dict(str, any) or np.ndarray): The mask. If output_mode='binary_mask', is an array of shape HW. Otherwise, is a dictionary containing the RLE. bbox (list(float)): The box around the mask, in XYWH format. area (int): The area in pixels of the mask. predicted_iou (float): The model's own prediction of the mask's quality. This is filtered by the pred_iou_thresh parameter. point_coords (list(list(float))): The point coordinates input to the model to generate this mask. stability_score (float): A measure of the mask's quality. This is filtered on using the stability_score_thresh parameter. crop_box (list(float)): The crop of the image used to generate the mask, given in XYWH format. """ # Generate masks mask_data = self._generate_masks(image) # Filter small disconnected regions and holes in masks if self.min_mask_region_area > 0: mask_data = self.postprocess_small_regions( mask_data, self.min_mask_region_area, max(self.box_nms_thresh, self.crop_nms_thresh), ) # Encode masks if self.output_mode == "coco_rle": mask_data["segmentations"] = [coco_encode_rle(rle) for rle in mask_data["rles"]] elif self.output_mode == "binary_mask":
mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]]
14
2023-11-03 17:05:40+00:00
12k
microsoft/PLEX
scripts/exps_on_MW.py
[ { "identifier": "pretrain_EX", "path": "PLEX/pretraining_EX.py", "snippet": "def pretrain_EX(cmdline_args):\n os.environ[\"NCCL_DEBUG\"] = \"INFO\"\n print(\"=== Pretraining the Execuctor ===\")\n parser = argparse.ArgumentParser()\n\n # Add all relevant command-line arguments\n add_common_args(parser)\n add_common_pretraining_args(parser)\n parser.add_argument('--noncontextual_pretrain_tasks', type=str, default=None)\n\n # Parse them and validate them\n args = parser.parse_args(cmdline_args)\n args = vars(args)\n assert args['best_metric'] != 'evaluation/success_rate', 'Currently, evaluation/success_rate is not a valid metric for pretraining. Use evaluation/neg_val_error instead.'\n\n # These parameters are needed only for evaluating the model. Since at the current stage we are pretraining just the EX\n # (inverse dynamics) part of PLEX, the values of the parameters other than bc_learning_mode don't matter, since at the\n # end of this stage the model won't yet know how to handle goal contexts.\n args['bc_learning_mode'] = True\n args['context_style'] = 'blank'\n args['context_from_same_traj'] = False\n args['reward_type'] = 'native'\n args['normalize_reward'] = False\n args['discount'] = 0\n\n # If we are pretraining a PLEX model, for loss computation we should use *just* the inverse dynamics predictions\n # computed based on obs. in the training trajectories (not predictions of the obs., and not predictions of inv.d. based\n # on predicted obs. -- both of these need context to be provided, and we want inv.d. to be context-independent).\n #\n # NOTE: The arguments below aren't actual command-line arguments. We are just addeing them to args[] out of convenience.\n if args['model'] == 'PLEX':\n args['grounded_inverse_dynamics_loss_weight'] = 1\n args['predicted_inverse_dynamics_loss_weight'] = 0\n args['future_prediction_loss_weight'] = 0\n\n log, log_to_wandb, timer, data_shuffling_rng, device, camera_names, modalities_to_mask, data_dir, common_env_metadata_dict = setup_essentials(args)\n # NOTE: common_env_metadata_dict may be modified by the calls to load_data below.\n\n # Load data: context-agnostic dynamics data and validation trajectories (if any)\n noncontextual_pretrain_tasks, noncontextual_pretrain_max_trajs = parse_tasks(args['noncontextual_pretrain_tasks'], args['robot'], args['max_pretrain_trajectories'])\n print(f'*** The validation tasks are: {args[\"validation_tasks\"]} ***')\n validation_tasks, validation_max_trajs = parse_tasks(args['validation_tasks'], args['robot'], args['max_validation_trajectories'])\n\n all_pretrain_trajectories = []\n\n # First, load validation data, if any\n if validation_tasks:\n print(\"Reading validation tasks...\")\n data = load_data(log,\n data_dir,\n validation_tasks,\n # NOTE: the parameter that controls this is max_validation_trajectories, *NOT* max_pretrain_trajectories.\n max_trajectories=validation_max_trajs,\n camera_names=camera_names,\n image_size=args['image_size'],\n target_frame_rate=args['target_frame_rate'],\n # This doesn't matter for evaluation of pretrained executor.\n normalize_rewards=False,\n # This doesn't matter for evaluation of pretrained executor.\n reward_type='sparse',\n common_env_metadata_dict=common_env_metadata_dict,\n data_shuffling_rng=data_shuffling_rng)\n val_train_data, val_val_data = {}, {}\n for k, v in data.items():\n print(f'Splitting the data of validation task {k}...')\n train_trajectories, val_trajectories = train_val_split(v, args['validation_frac'])\n val_train_data[k] = TrajectoryDataset(train_trajectories, camera_names, True)\n val_val_data[k] = TrajectoryDataset(val_trajectories, camera_names, True)\n print(f'Stored {len(val_train_data[k].trajectories)} training and {len(val_val_data[k].trajectories)} validation trajectories for task {k}...')\n\n \"\"\"\n If we don't have a finetuning stage for evaluating the pretrained model, use the training trajectories\n of the validation tasks for pretraining the model. These tasks' validation trajectories will still be used\n for computing the pretrained model's validation loss.\n \"\"\"\n if args['num_steps_per_ft_eval_iter'] <= 0 and args['validation_frac'] < 1.0:\n print(f\"NOTE: since we aren't doing finetuning for evaluation at pretraining time (num_steps_per_ft_eval_iter = {args['num_steps_per_ft_eval_iter']}), we'll use some of the trajectories from validation task {k} during pretraining. These trajectries are *not* in the validation split.\")\n all_pretrain_trajectories.extend(train_trajectories)\n del data\n\n # Then, load context-agnostic dynamics data\n print(\"Reading context-agnostic dynamics data...\")\n data = load_data(log,\n data_dir,\n noncontextual_pretrain_tasks,\n video_only=False,\n max_trajectories=noncontextual_pretrain_max_trajs,\n camera_names=camera_names,\n image_size=args['image_size'],\n target_frame_rate=args['target_frame_rate'],\n # This doesn't matter for evaluation of pretrained executor.\n normalize_rewards=False,\n # This doesn't matter for evaluation of pretrained executor.\n reward_type='sparse',\n common_env_metadata_dict=common_env_metadata_dict,\n data_shuffling_rng=data_shuffling_rng)\n for k, v in data.items():\n log(f'{len(v)} trajectories for task {k}')\n all_pretrain_trajectories.extend(v)\n\n noncontextual_pretrain_data = TrajectoryDataset(all_pretrain_trajectories, camera_names, False)\n del data\n\n # Instantiate a model\n model, trainable_param_spec = setup_model(args,\n noncontextual_pretrain_tasks[0],\n log,\n device,\n camera_names,\n modalities_to_mask,\n data_dir,\n bc_mode=False)\n\n # Prepare the model for training\n trainable_params = set_trainable_params(model, trainable_param_spec, log)\n\n # Instantiate a batch sampler over the training data we loaded above\n batch_sampler = setup_batch_sampler(noncontextual_pretrain_data, None, args, device)\n\n # NOTE: We should reconsider how finetuning-based evaluator works should it be allowed to modify only exactly\n # same set of parameters that training modifies (trainable_params) or a different one (e.g., just the head)?\n #\n # Either way, in the most common evaluation case, i.e., when this evaluator just runs the model against\n # the validation tasks' data without actually doing finetuning (args['num_steps_per_ft_eval_iter'] = 0),\n # this method works correctly now.\n eval_fns = [get_finetuning_based_evaluator(val_train_data, val_val_data, trainable_params, args, device)]\n\n # Instantiate a trainer\n trainer = setup_trainer(batch_sampler,\n args['pretrain_learning_rate'],\n eval_fns,\n model,\n trainable_params,\n args)\n\n\n if log_to_wandb:\n group_name = f'{args[\"robot\"]}_pretrain'\n setup_wandb_logging(group_name, args)\n\n # Run training\n model_name_prefix = ('pretr_' + args['model'] + '__' if args['model'] != 'PLEX' else 'pretr_EX__')\n metric_values = run_training(trainer, model, args['pretrain_steps_per_iter'], model_name_prefix, args, log, log_to_wandb, timer)\n return metric_values" }, { "identifier": "pretrain_PL", "path": "PLEX/pretraining_PL.py", "snippet": "def pretrain_PL(cmdline_args):\n os.environ[\"NCCL_DEBUG\"] = \"INFO\"\n print(\"=== Pretraining the Planner ===\")\n parser = argparse.ArgumentParser()\n\n # Add all relevant command-line arguments\n add_common_args(parser)\n add_common_pretraining_args(parser)\n add_conditioning_args(parser)\n parser.add_argument('--video_tasks', type=str, default=None)\n\n # Parse them and validate them\n args = parser.parse_args(cmdline_args)\n args = vars(args)\n if not args['bc_learning_mode']:\n assert 'reward' not in args['modalities_to_mask'], \"If the model is expected to condition on returns, then they should not be masked out.\"\n assert args['best_metric'] != 'evaluation/success_rate', 'Currently, evaluation/success_rate is not a valid metric for pretraining. Use evaluation/neg_val_error instead.'\n\n # If we are pretraining a PLEX model, for loss computation we should use *just* the obs. embedding predictions,\n # not predictions of inverse dynamics.\n #\n # NOTE: The arguments below aren't actual command-line arguments. We are just addeing them to args[] out of convenience.\n if args['model'] == 'PLEX':\n args['grounded_inverse_dynamics_loss_weight'] = 0\n args['predicted_inverse_dynamics_loss_weight'] = 0\n args['future_prediction_loss_weight'] = 1\n\n log, log_to_wandb, timer, data_shuffling_rng, device, camera_names, modalities_to_mask, data_dir, common_env_metadata_dict = setup_essentials(args)\n # NOTE: common_env_metadata_dict may be modified by the calls to load_data below.\n\n # Load data: videos and validation trajectories (if any)\n video_tasks, video_max_trajs = parse_tasks(args['video_tasks'])\n print(f'*** The validation tasks are: {args[\"validation_tasks\"]} ***')\n validation_tasks, validation_max_trajs = parse_tasks(args['validation_tasks'], args['robot'], args['max_validation_trajectories'])\n\n all_pretrain_trajectories = []\n # First, load validation data, if any\n if validation_tasks:\n print(\"Reading validation tasks...\")\n data = load_data(log,\n data_dir,\n validation_tasks,\n # NOTE: the parameter that controls this is max_validation_trajectories, *NOT* max_pretrain_trajectories.\n max_trajectories=validation_max_trajs,\n camera_names=camera_names,\n image_size=args['image_size'],\n target_frame_rate=args['target_frame_rate'],\n normalize_rewards=args['normalize_reward'],\n reward_type=args['reward_type'],\n common_env_metadata_dict=common_env_metadata_dict,\n data_shuffling_rng=data_shuffling_rng)\n val_train_data, val_val_data = {}, {}\n for k, v in data.items():\n print(f'Splitting the data of validation task {k}...')\n train_trajectories, val_trajectories = train_val_split(v, args['validation_frac'])\n val_train_data[k] = TrajectoryDataset(train_trajectories, camera_names, True)\n val_val_data[k] = TrajectoryDataset(val_trajectories, camera_names, True)\n print(f'Stored {len(val_train_data[k].trajectories)} training and {len(val_val_data[k].trajectories)} validation trajectories for task {k}...')\n\n \"\"\"\n If we don't have a finetuning stage for evaluating the pretrained model, use the training trajectories\n of the validation tasks for pretraining the model. These tasks' validation trajectories will still be used\n for computing the pretrained model's validation loss.\n \"\"\"\n if args['num_steps_per_ft_eval_iter'] <= 0 and args['validation_frac'] < 1.0:\n print(f\"NOTE: since we aren't doing finetuning for evaluation at pretraining time (num_steps_per_ft_eval_iter = {args['num_steps_per_ft_eval_iter']}), we'll use some of the trajectories from validation task {k} during pretraining. These trajectries are *not* in the validation split.\")\n all_pretrain_trajectories.extend(train_trajectories)\n del data\n\n # Then, load video-only data\n print(\"Reading video-only data...\")\n data = load_data(log,\n data_dir,\n video_tasks,\n video_only=True,\n max_trajectories=video_max_trajs,\n camera_names=camera_names,\n image_size=args['image_size'],\n target_frame_rate=args['target_frame_rate'],\n normalize_rewards=args['normalize_reward'],\n reward_type=args['reward_type'],\n common_env_metadata_dict=common_env_metadata_dict,\n data_shuffling_rng=data_shuffling_rng)\n for k, v in data.items():\n log(f'{len(v)} videos for task {k}')\n all_pretrain_trajectories.extend(v)\n\n video_data = TrajectoryDataset(all_pretrain_trajectories, camera_names, True)\n del data\n\n # Instantiate a model\n model, trainable_param_spec = setup_model(args,\n video_tasks[0],\n log,\n device,\n camera_names,\n modalities_to_mask,\n data_dir,\n args['bc_learning_mode'])\n\n # Prepare the model for training\n trainable_params = set_trainable_params(model, trainable_param_spec, log)\n\n # Instantiate a batch sampler over the training data we loaded above\n batch_sampler = setup_batch_sampler(video_data, args['context_style'], args, device)\n\n # NOTE: We should reconsider how finetuning-based evaluator works should it be allowed to modify only exactly\n # same set of parameters that training modifies (trainable_params) or a different one (e.g., just the head)?\n #\n # Either way, in the most common evaluation case, i.e., when this evaluator just runs the model against\n # the validation tasks' data without actually doing finetuning (args['num_steps_per_ft_eval_iter'] = 0),\n # this method works correctly now.\n eval_fns = [get_finetuning_based_evaluator(val_train_data, val_val_data, trainable_params, args, device)]\n\n # Instantiate a trainer\n trainer = setup_trainer(batch_sampler,\n args['pretrain_learning_rate'],\n eval_fns,\n model,\n trainable_params,\n args)\n\n if log_to_wandb:\n group_name = f'{args[\"robot\"]}_pretrain'\n setup_wandb_logging(group_name, args)\n\n # Run training\n model_name_prefix = ('pretr_' + args['model'] + '__' if args['model'] != 'PLEX' else 'pretr_PLEX__')\n metric_values = run_training(trainer, model, args['pretrain_steps_per_iter'], model_name_prefix, args, log, log_to_wandb, timer)\n return metric_values" }, { "identifier": "finetune", "path": "PLEX/finetuning.py", "snippet": "def finetune(cmdline_args):\n os.environ[\"NCCL_DEBUG\"] = \"INFO\"\n print(\"=== Finetuning ===\")\n parser = argparse.ArgumentParser()\n # Add all relevant command-line arguments\n add_common_args(parser)\n add_conditioning_args(parser)\n parser.add_argument('--finetune_learning_rate', type=float, default=1e-5)\n parser.add_argument('--finetune_steps_per_iter', type=int, default=100)\n parser.add_argument('--target_task', type=str, default=None)\n parser.add_argument('--max_target_trajectories', type=int, default=None)\n\n # Parse them and validate them\n args = parser.parse_args(cmdline_args)\n args = vars(args)\n if not args['bc_learning_mode']:\n assert 'reward' not in args['modalities_to_mask'], \"If the model is expected to condition on returns, then they should not be masked out.\"\n\n # NOTE: The arguments below aren't actual command-line arguments. We are just addeing them to args[] out of convenience.\n # Note also that during finetuning we set predicted_inverse_dynamics_loss_weight=1, i.e., **in case the\n # finetuning trajectories contain actions**, we adapt PLEX's based on the predicted observation latents\n # from it planner PL rather than based on the actual (\"grounded\") observation latents contained\n # in finetuning trajectories.\n if args['model'] == 'PLEX':\n args['grounded_inverse_dynamics_loss_weight'] = 0\n args['predicted_inverse_dynamics_loss_weight'] = 1\n args['future_prediction_loss_weight'] = 1\n\n log, log_to_wandb, timer, data_shuffling_rng, device, camera_names, modalities_to_mask, data_dir, common_env_metadata_dict = setup_essentials(args)\n # NOTE: common_env_metadata_dict may be modified by the calls to load_data below.\n\n # Load data: target-task trajectories\n target_tasks, target_max_trajs = parse_tasks(args['target_task'], args['robot'], args['max_target_trajectories'])\n target_task = target_tasks[0]\n\n data = load_data(log,\n data_dir,\n target_tasks,\n max_trajectories=target_max_trajs,\n discount=args['discount'],\n camera_names=camera_names,\n image_size=args['image_size'],\n target_frame_rate=args['target_frame_rate'],\n normalize_rewards=args['normalize_reward'],\n reward_type=args['reward_type'],\n common_env_metadata_dict=common_env_metadata_dict,\n data_shuffling_rng=data_shuffling_rng)\n\n assert len(data.keys()) == 1, f\"There should be only one target task. Discovered {len(data.keys())}: {data.keys()}\"\n #assert args['validation_tasks'] is None, f\"Validation tasks other than the target tasks aren't used during finetuning and were likely specified erroneously: {args['validation_tasks']}.\"\n\n # Train/test split\n # NOTE: we don't actually need create the split if args['best_metric'] == 'evaluation/success_rate'\n if args['best_metric'] == 'evaluation/success_rate':\n print(\"WARNING: since the evaluation metric is success rate, the training-validation split of the target task data will be ignored, and all target-task trajectories will be used for training.\")\n train_trajectories, val_trajectories = train_val_split(data[target_task.name], args['validation_frac'])\n target_all_data = TrajectoryDataset(data[target_task.name], camera_names, contextual=True)\n print(f\"Total target trajectories: {len(target_all_data)}\")\n target_train_data = TrajectoryDataset(train_trajectories, camera_names, contextual=True)\n target_val_data = TrajectoryDataset(val_trajectories, camera_names, contextual=True)\n del train_trajectories\n del val_trajectories\n log(f'{len(target_train_data.trajectories)} train and {len(target_val_data.trajectories)} validation trajectories')\n\n # Instantiate a model\n model, trainable_param_spec = setup_model(args,\n target_task,\n log,\n device,\n camera_names,\n modalities_to_mask,\n data_dir,\n args['bc_learning_mode'])\n\n # If the number of training iterations is 0, we are being asked to just evaluate the model\n if args['max_iters'] == 0:\n print(\"--------------- RUNNING IN EVALUATION MODE ----------------\")\n # We are in the evaluation mode\n # Note that for evaluation, we are using *all* the demonstration data for the task, not just the validation data.\n # This is because get_success_rate_evaluator will use the demo trajectories only for sampling the goals/contexts.\n # We allow using the same contexts during both training and evaluation.\n evaluator = get_success_rate_evaluator(target_task, target_all_data, common_env_metadata_dict, args, log.dir)\n dummy_iter_num = 0\n outputs = evaluator(model, dummy_iter_num)\n\n logs = dict()\n for k, v in outputs.items():\n logs[f'evaluation/{k}'] = [v]\n\n for k, v in logs.items():\n print(f'{k}: {v[0]}')\n\n print(\"--------------- FINISHED EVALUATION ----------------\")\n return logs\n\n # Otherwise, prepare the model for training\n trainable_params = set_trainable_params(model, trainable_param_spec, log)\n\n # Instantiate a batch sampler over the training data we loaded above\n if args['best_metric'] == 'evaluation/neg_val_error':\n batch_sampler = setup_batch_sampler(target_train_data, args['context_style'], args, device)\n else:\n # Recall from above that if the metric is success rate, we use all target task data for training,\n # without allocating any of this data for validation.\n batch_sampler = setup_batch_sampler(target_all_data, args['context_style'], args, device)\n\n # Setup a model evaluator\n eval_fn_dict = {'evaluation/neg_val_error': get_validation_error_evaluator(target_val_data, args, device),\n 'evaluation/success_rate': get_success_rate_evaluator(target_task, target_all_data, common_env_metadata_dict, args, log.dir)}\n eval_fns = [eval_fn_dict[args['best_metric']]]\n\n # Instantiate a trainer\n trainer = setup_trainer(batch_sampler,\n args['finetune_learning_rate'],\n eval_fns,\n model,\n trainable_params,\n args)\n\n if log_to_wandb:\n group_name = f'{args[\"robot\"]}_target-{target_task.name}'\n setup_wandb_logging(group_name, args)\n\n # Run training\n model_name_prefix = 'finet_' + args['model'] + target_task.name + '__'\n metric_values = run_training(trainer, model, args['finetune_steps_per_iter'], model_name_prefix, args, log, log_to_wandb, timer)\n return metric_values" } ]
from PLEX.pretraining_EX import pretrain_EX from PLEX.pretraining_PL import pretrain_PL from PLEX.finetuning import finetune import argparse import random
7,451
parser.add_argument("-w", "--num_workers", type=int, default=0, help = "Number of worker for running the evaluation episodes. NOTE: applicable only if the training stage is 'ft' (finetuning).") args = parser.parse_args() common_flags = ['--relative_position_encodings', '--bc_learning_mode'] common_args = { 'seed': str(random.randint(0, 1000000)), 'data_dir': args.data_dir, 'log_dir': args.log_dir, 'robot': 'Sawyer', 'camera_names': 'corner', 'modalities_to_mask': 'proprio,action', 'record_camera': 'corner', 'image_size': '84', 'reward_type': 'sparse', 'image_encoder_arch': 'resnet18', 'impute_style': 'trainable', 'embed_dim': '256', 'future_step': '1', 'activation_function': 'relu', 'device': 'cuda', 'dropout': '0.2', 'weight_decay': '1e-05', 'warmup_steps': '200', 'batch_size': '256', 'action_output_type': 'deterministic', 'model': 'PLEX', 'obs_pred.n_layer': '3', 'obs_pred.n_head': '4', 'obs_pred.K': '30', 'inv_d_pred.n_layer': '3', 'inv_d_pred.n_head': '4', 'inv_d_pred.K': '30' } common_pretraining_flags = ['--no_video'] common_pretraining_args = { 'pretrain_learning_rate': '0.0005', 'pretrain_steps_per_iter': '250', 'num_steps_per_ft_eval_iter': '0', 'best_metric': 'evaluation/neg_val_error', 'validation_frac': '1.0', 'validation_samples': '30', # Validation tasks can be any MW tasks -- we don't use validation error to stop training. # We use the target tasks as validation tasks. 'validation_tasks': 'metaworld/hand-insert-v2/--TARGET_ROBOT--/noise0/,metaworld/door-lock-v2/--TARGET_ROBOT--/noise0/,metaworld/door-unlock-v2/--TARGET_ROBOT--/noise0/,metaworld/box-close-v2/--TARGET_ROBOT--/noise0/,metaworld/bin-picking-v2/--TARGET_ROBOT--/noise0/', } cmdline_args = common_flags for k in common_args: cmdline_args.append('--' + k) cmdline_args.append(common_args[k]) if args.training_stage == 'ex': cmdline_args.extend(common_pretraining_flags) for k in common_pretraining_args: cmdline_args.append('--' + k) cmdline_args.append(common_pretraining_args[k]) cmdline_args.extend([ '--max_iters', '10', # To pretrain the executor, use 75 play trajectories per task. '--max_pretrain_trajectories', '75', # During executor pretraining, we adapt both the executor's and the encoder's weights but keep the planner frozen. '--image_encoder_tune_style', 'all', '--obs_pred.transformer_tune_style', 'none', '--inv_d_pred.transformer_tune_style', 'all', # Use the dynamics data from Meta-World ML50's 5 downstream environments. '--noncontextual_pretrain_tasks', 'metaworld/hand-insert-v2/--TARGET_ROBOT--/noise0.5/,metaworld/door-lock-v2/--TARGET_ROBOT--/noise0.5/,metaworld/door-unlock-v2/--TARGET_ROBOT--/noise0.5/,metaworld/box-close-v2/--TARGET_ROBOT--/noise0.5/,metaworld/bin-picking-v2/--TARGET_ROBOT--/noise0.5/', ]) pretrain_EX(cmdline_args) elif args.training_stage == 'pl': cmdline_args.extend(common_pretraining_flags) for k in common_pretraining_args: cmdline_args.append('--' + k) cmdline_args.append(common_pretraining_args[k]) cmdline_args.extend([ '--max_iters', '10', # To pretrain the planner, use all (100) available video demonstrations per task. '--max_pretrain_trajectories', 100, '--context_style', 'first-success', '--context_from_diff_traj', # During planner pretraining, we want to keep the encoder and the executor's weights frozen, adapting only the weights of the planner itself. '--image_encoder_tune_style', 'none', '--obs_pred.transformer_tune_style', 'all', '--inv_d_pred.transformer_tune_style', 'none', # For pretraining, use video demonstrations from Meta-World ML50's 45 pretraining tasks. '--video_tasks', 'metaworld/pick-out-of-hole-v2/Sawyer/noise0/,metaworld/door-open-v2/Sawyer/noise0/,metaworld/pick-place-wall-v2/Sawyer/noise0/,metaworld/assembly-v2/Sawyer/noise0/,metaworld/faucet-close-v2/Sawyer/noise0/,metaworld/coffee-pull-v2/Sawyer/noise0/,metaworld/plate-slide-back-side-v2/Sawyer/noise0/,metaworld/dial-turn-v2/Sawyer/noise0/,metaworld/stick-push-v2/Sawyer/noise0/,metaworld/sweep-into-v2/Sawyer/noise0/,metaworld/handle-pull-side-v2/Sawyer/noise0/,metaworld/drawer-open-v2/Sawyer/noise0/,metaworld/window-open-v2/Sawyer/noise0/,metaworld/button-press-v2/Sawyer/noise0/,metaworld/assembly-v2/Sawyer/noise0/,metaworld/faucet-close-v2/Sawyer/noise0/,metaworld/coffee-pull-v2/Sawyer/noise0/,metaworld/plate-slide-back-side-v2/Sawyer/noise0/,metaworld/dial-turn-v2/Sawyer/noise0/,metaworld/stick-push-v2/Sawyer/noise0/,metaworld/sweep-into-v2/Sawyer/noise0/,metaworld/handle-pull-side-v2/Sawyer/noise0/,metaworld/shelf-place-v2/Sawyer/noise0/,metaworld/basketball-v2/Sawyer/noise0/,metaworld/button-press-topdown-v2/Sawyer/noise0/,metaworld/button-press-topdown-wall-v2/Sawyer/noise0/,metaworld/button-press-wall-v2/Sawyer/noise0/,metaworld/coffee-button-v2/Sawyer/noise0/,metaworld/coffee-push-v2/Sawyer/noise0/,metaworld/disassemble-v2/Sawyer/noise0/,metaworld/door-close-v2/Sawyer/noise0/,metaworld/drawer-close-v2/Sawyer/noise0/,metaworld/faucet-open-v2/Sawyer/noise0/,metaworld/hammer-v2/Sawyer/noise0/,metaworld/handle-press-side-v2/Sawyer/noise0/,metaworld/handle-press-v2/Sawyer/noise0/,metaworld/handle-pull-v2/Sawyer/noise0/,metaworld/lever-pull-v2/Sawyer/noise0/,metaworld/peg-insert-side-v2/Sawyer/noise0/,metaworld/reach-v2/Sawyer/noise0/,metaworld/push-back-v2/Sawyer/noise0/,metaworld/push-v2/Sawyer/noise0/,metaworld/pick-place-v2/Sawyer/noise0/,metaworld/plate-slide-v2/Sawyer/noise0/,metaworld/plate-slide-side-v2/Sawyer/noise0/,metaworld/plate-slide-back-v2/Sawyer/noise0/,metaworld/peg-unplug-side-v2/Sawyer/noise0/,metaworld/soccer-v2/Sawyer/noise0/,metaworld/stick-pull-v2/Sawyer/noise0/,metaworld/push-wall-v2/Sawyer/noise0/,metaworld/reach-wall-v2/Sawyer/noise0/,metaworld/sweep-v2/Sawyer/noise0/,metaworld/window-close-v2/Sawyer/noise0/', '--load_path', args.model_file ]) pretrain_PL(cmdline_args) elif args.training_stage == 'ft': cmdline_args.extend([ '--max_iters', '10', # Use just 10 trajectories of the target task for finetunning, randomly sampled from the set of all (100) of that task's training trajectories. '--max_target_trajectories', '10', '--target_task', args.target_task, '--context_style', 'first-success', '--context_from_diff_traj', # During finetuning we adapt just the last transformer layer of the planner, keeping the planner's other layers as well as the encoder and the executor frozen. # We could adapt other parts of PLEX too, but it's unnecessary to reproduce the PLEX paper's results. '--image_encoder_tune_style', 'none', '--obs_pred.transformer_tune_style', 'last_block', '--inv_d_pred.transformer_tune_style', 'none', '--finetune_learning_rate', '0.0005', '--finetune_steps_per_iter', '100', '--best_metric', 'evaluation/success_rate', '--max_eval_episode_len', '500', '--num_eval_episodes', '50', '--num_eval_workers', str(args.num_workers), # Remove this flag if you don't want videos of evaluation trajectories to be recorded. '--record_video', '--load_path', args.model_file ])
if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument("-s", "--training_stage", type=str, default='ft', help = "The training stage. Can be 'ex' (pretaining the EXecutor), 'pl' (pretraining the PLanner), or 'ft' (finetuning a pretrained PLEX)") parser.add_argument("-d", "--data_dir", type=str, default='store/data', help = "Directory path where the training data is.") parser.add_argument("-l", "--log_dir", type=str, default='store/logs', help = "Directory path where to output logs and model checkpoints.") parser.add_argument("-m", "--model_file", type=str, default=None, help = "Model file path.") parser.add_argument("-t", "--target_task", type=str, default=None, help = "Directory path where the target task's data is. NOTE: applicable only if the training stage is 'ft' (finetuning).") parser.add_argument("-w", "--num_workers", type=int, default=0, help = "Number of worker for running the evaluation episodes. NOTE: applicable only if the training stage is 'ft' (finetuning).") args = parser.parse_args() common_flags = ['--relative_position_encodings', '--bc_learning_mode'] common_args = { 'seed': str(random.randint(0, 1000000)), 'data_dir': args.data_dir, 'log_dir': args.log_dir, 'robot': 'Sawyer', 'camera_names': 'corner', 'modalities_to_mask': 'proprio,action', 'record_camera': 'corner', 'image_size': '84', 'reward_type': 'sparse', 'image_encoder_arch': 'resnet18', 'impute_style': 'trainable', 'embed_dim': '256', 'future_step': '1', 'activation_function': 'relu', 'device': 'cuda', 'dropout': '0.2', 'weight_decay': '1e-05', 'warmup_steps': '200', 'batch_size': '256', 'action_output_type': 'deterministic', 'model': 'PLEX', 'obs_pred.n_layer': '3', 'obs_pred.n_head': '4', 'obs_pred.K': '30', 'inv_d_pred.n_layer': '3', 'inv_d_pred.n_head': '4', 'inv_d_pred.K': '30' } common_pretraining_flags = ['--no_video'] common_pretraining_args = { 'pretrain_learning_rate': '0.0005', 'pretrain_steps_per_iter': '250', 'num_steps_per_ft_eval_iter': '0', 'best_metric': 'evaluation/neg_val_error', 'validation_frac': '1.0', 'validation_samples': '30', # Validation tasks can be any MW tasks -- we don't use validation error to stop training. # We use the target tasks as validation tasks. 'validation_tasks': 'metaworld/hand-insert-v2/--TARGET_ROBOT--/noise0/,metaworld/door-lock-v2/--TARGET_ROBOT--/noise0/,metaworld/door-unlock-v2/--TARGET_ROBOT--/noise0/,metaworld/box-close-v2/--TARGET_ROBOT--/noise0/,metaworld/bin-picking-v2/--TARGET_ROBOT--/noise0/', } cmdline_args = common_flags for k in common_args: cmdline_args.append('--' + k) cmdline_args.append(common_args[k]) if args.training_stage == 'ex': cmdline_args.extend(common_pretraining_flags) for k in common_pretraining_args: cmdline_args.append('--' + k) cmdline_args.append(common_pretraining_args[k]) cmdline_args.extend([ '--max_iters', '10', # To pretrain the executor, use 75 play trajectories per task. '--max_pretrain_trajectories', '75', # During executor pretraining, we adapt both the executor's and the encoder's weights but keep the planner frozen. '--image_encoder_tune_style', 'all', '--obs_pred.transformer_tune_style', 'none', '--inv_d_pred.transformer_tune_style', 'all', # Use the dynamics data from Meta-World ML50's 5 downstream environments. '--noncontextual_pretrain_tasks', 'metaworld/hand-insert-v2/--TARGET_ROBOT--/noise0.5/,metaworld/door-lock-v2/--TARGET_ROBOT--/noise0.5/,metaworld/door-unlock-v2/--TARGET_ROBOT--/noise0.5/,metaworld/box-close-v2/--TARGET_ROBOT--/noise0.5/,metaworld/bin-picking-v2/--TARGET_ROBOT--/noise0.5/', ]) pretrain_EX(cmdline_args) elif args.training_stage == 'pl': cmdline_args.extend(common_pretraining_flags) for k in common_pretraining_args: cmdline_args.append('--' + k) cmdline_args.append(common_pretraining_args[k]) cmdline_args.extend([ '--max_iters', '10', # To pretrain the planner, use all (100) available video demonstrations per task. '--max_pretrain_trajectories', 100, '--context_style', 'first-success', '--context_from_diff_traj', # During planner pretraining, we want to keep the encoder and the executor's weights frozen, adapting only the weights of the planner itself. '--image_encoder_tune_style', 'none', '--obs_pred.transformer_tune_style', 'all', '--inv_d_pred.transformer_tune_style', 'none', # For pretraining, use video demonstrations from Meta-World ML50's 45 pretraining tasks. '--video_tasks', 'metaworld/pick-out-of-hole-v2/Sawyer/noise0/,metaworld/door-open-v2/Sawyer/noise0/,metaworld/pick-place-wall-v2/Sawyer/noise0/,metaworld/assembly-v2/Sawyer/noise0/,metaworld/faucet-close-v2/Sawyer/noise0/,metaworld/coffee-pull-v2/Sawyer/noise0/,metaworld/plate-slide-back-side-v2/Sawyer/noise0/,metaworld/dial-turn-v2/Sawyer/noise0/,metaworld/stick-push-v2/Sawyer/noise0/,metaworld/sweep-into-v2/Sawyer/noise0/,metaworld/handle-pull-side-v2/Sawyer/noise0/,metaworld/drawer-open-v2/Sawyer/noise0/,metaworld/window-open-v2/Sawyer/noise0/,metaworld/button-press-v2/Sawyer/noise0/,metaworld/assembly-v2/Sawyer/noise0/,metaworld/faucet-close-v2/Sawyer/noise0/,metaworld/coffee-pull-v2/Sawyer/noise0/,metaworld/plate-slide-back-side-v2/Sawyer/noise0/,metaworld/dial-turn-v2/Sawyer/noise0/,metaworld/stick-push-v2/Sawyer/noise0/,metaworld/sweep-into-v2/Sawyer/noise0/,metaworld/handle-pull-side-v2/Sawyer/noise0/,metaworld/shelf-place-v2/Sawyer/noise0/,metaworld/basketball-v2/Sawyer/noise0/,metaworld/button-press-topdown-v2/Sawyer/noise0/,metaworld/button-press-topdown-wall-v2/Sawyer/noise0/,metaworld/button-press-wall-v2/Sawyer/noise0/,metaworld/coffee-button-v2/Sawyer/noise0/,metaworld/coffee-push-v2/Sawyer/noise0/,metaworld/disassemble-v2/Sawyer/noise0/,metaworld/door-close-v2/Sawyer/noise0/,metaworld/drawer-close-v2/Sawyer/noise0/,metaworld/faucet-open-v2/Sawyer/noise0/,metaworld/hammer-v2/Sawyer/noise0/,metaworld/handle-press-side-v2/Sawyer/noise0/,metaworld/handle-press-v2/Sawyer/noise0/,metaworld/handle-pull-v2/Sawyer/noise0/,metaworld/lever-pull-v2/Sawyer/noise0/,metaworld/peg-insert-side-v2/Sawyer/noise0/,metaworld/reach-v2/Sawyer/noise0/,metaworld/push-back-v2/Sawyer/noise0/,metaworld/push-v2/Sawyer/noise0/,metaworld/pick-place-v2/Sawyer/noise0/,metaworld/plate-slide-v2/Sawyer/noise0/,metaworld/plate-slide-side-v2/Sawyer/noise0/,metaworld/plate-slide-back-v2/Sawyer/noise0/,metaworld/peg-unplug-side-v2/Sawyer/noise0/,metaworld/soccer-v2/Sawyer/noise0/,metaworld/stick-pull-v2/Sawyer/noise0/,metaworld/push-wall-v2/Sawyer/noise0/,metaworld/reach-wall-v2/Sawyer/noise0/,metaworld/sweep-v2/Sawyer/noise0/,metaworld/window-close-v2/Sawyer/noise0/', '--load_path', args.model_file ]) pretrain_PL(cmdline_args) elif args.training_stage == 'ft': cmdline_args.extend([ '--max_iters', '10', # Use just 10 trajectories of the target task for finetunning, randomly sampled from the set of all (100) of that task's training trajectories. '--max_target_trajectories', '10', '--target_task', args.target_task, '--context_style', 'first-success', '--context_from_diff_traj', # During finetuning we adapt just the last transformer layer of the planner, keeping the planner's other layers as well as the encoder and the executor frozen. # We could adapt other parts of PLEX too, but it's unnecessary to reproduce the PLEX paper's results. '--image_encoder_tune_style', 'none', '--obs_pred.transformer_tune_style', 'last_block', '--inv_d_pred.transformer_tune_style', 'none', '--finetune_learning_rate', '0.0005', '--finetune_steps_per_iter', '100', '--best_metric', 'evaluation/success_rate', '--max_eval_episode_len', '500', '--num_eval_episodes', '50', '--num_eval_workers', str(args.num_workers), # Remove this flag if you don't want videos of evaluation trajectories to be recorded. '--record_video', '--load_path', args.model_file ])
finetune(cmdline_args)
2
2023-11-06 09:38:09+00:00
12k
mitre/arlin
tests/test_samdp.py
[ { "identifier": "COLORS", "path": "arlin/analysis/visualization/colors.py", "snippet": "COLORS = [\n base[\"b\"],\n tableau[\"tab:orange\"],\n base[\"g\"],\n base[\"r\"],\n base[\"c\"],\n base[\"m\"],\n base[\"y\"],\n base[\"k\"],\n tableau[\"tab:blue\"],\n tableau[\"tab:green\"],\n tableau[\"tab:red\"],\n tableau[\"tab:purple\"],\n tableau[\"tab:brown\"],\n tableau[\"tab:pink\"],\n tableau[\"tab:gray\"],\n css4[\"brown\"],\n css4[\"salmon\"],\n css4[\"chocolate\"],\n css4[\"burlywood\"],\n css4[\"darkgoldenrod\"],\n css4[\"gold\"],\n css4[\"khaki\"],\n css4[\"yellow\"],\n css4[\"darkolivegreen\"],\n css4[\"chartreuse\"],\n css4[\"lime\"],\n css4[\"turquoise\"],\n css4[\"darkslategray\"],\n css4[\"cadetblue\"],\n css4[\"powderblue\"],\n css4[\"steelblue\"],\n css4[\"dodgerblue\"],\n css4[\"royalblue\"],\n css4[\"navy\"],\n css4[\"mediumblue\"],\n css4[\"slateblue\"],\n css4[\"blueviolet\"],\n css4[\"mediumorchid\"],\n css4[\"darkmagenta\"],\n css4[\"magenta\"],\n css4[\"deeppink\"],\n css4[\"palevioletred\"],\n]" }, { "identifier": "SAMDP", "path": "arlin/samdp.py", "snippet": "class SAMDP:\n \"\"\"Class for an SAMDP of an RL policy.\"\"\"\n\n def __init__(self, clusters: np.ndarray, dataset: XRLDataset):\n \"\"\"Intialize an SAMDP object.\n\n Args:\n clusters (np.ndarray): Generated cluster data.\n dataset (XRLDataset): XRLDataset from an RL policy.\n \"\"\"\n self.clusters = clusters\n self.dataset = dataset\n\n self.samdp = self._generate()\n self.graph = self._generate_graph()\n\n def _generate(self) -> np.ndarray:\n \"\"\"Generate an SAMDP.\n\n Returns:\n np.ndarray: Numpy array representation of the SAMDP.\n \"\"\"\n logging.info(\"Generating SAMDP.\")\n self.num_actions = len(np.unique(self.dataset.actions))\n self.num_clusters = len(np.unique(self.clusters))\n\n samdp_counts = np.zeros([self.num_clusters, self.num_actions, self.num_clusters])\n\n for i in range(len(self.clusters) - 1):\n terminated = self.dataset.terminateds[i]\n\n if not terminated:\n cur_cluster = self.clusters[i]\n action = self.dataset.actions[i]\n next_cluster = self.clusters[i + 1]\n\n if not cur_cluster == next_cluster:\n samdp_counts[cur_cluster, action, next_cluster] += 1\n\n np.set_printoptions(suppress=True)\n np.seterr(divide=\"ignore\", invalid=\"ignore\")\n\n # Get the total number of out_edges for each cluster by action\n out_edges_per_action = np.sum(samdp_counts, axis=-1)\n\n # Add total row\n total_out_edges = np.expand_dims(np.sum(out_edges_per_action, 1), axis=-1)\n out_edges_per_action = np.append(out_edges_per_action, total_out_edges, axis=-1)\n\n out_edges_per_action = np.expand_dims(out_edges_per_action, axis=-1)\n # Expand total out_edges count to match size of samdp_counts\n expanded_out_edges = np.repeat(out_edges_per_action, self.num_clusters, axis=-1)\n\n # Get total out_edges to each cluster (not grouped by action)\n total_by_cluster = np.expand_dims(np.sum(samdp_counts, axis=1), 1)\n\n # Add the total counts row to the samdp_counts\n samdp_counts = np.concatenate([samdp_counts, total_by_cluster], axis=1)\n self.samdp_counts = samdp_counts\n\n samdp = samdp_counts / expanded_out_edges\n samdp = np.nan_to_num(samdp, nan=0)\n\n return samdp[:, :, :]\n\n def save_txt(self, save_dir: str) -> None:\n \"\"\"Create a text table representation of the SAMDP.\n\n Args:\n save_dir (str): Dir to save the text SAMDP to.\n \"\"\"\n samdp_data = [\"SAMDP\"]\n for from_cluster_id in range(self.num_clusters):\n table = PrettyTable()\n table.title = f\"Cluster {from_cluster_id}\"\n\n headers = [f\"Cluster {i}\" for i in range(self.num_clusters)]\n table.field_names = [\"Action Value\"] + headers\n for action in range(self.num_actions + 1):\n if action < self.num_actions:\n row = [f\"Action {action}\"]\n else:\n row = [\"Total\"]\n\n for to_cluster_id in range(self.num_clusters):\n value = self.samdp_counts[from_cluster_id, action, to_cluster_id]\n percent = self.samdp[from_cluster_id, action, to_cluster_id]\n row.append(f\"{value} | {round(percent*100, 2)}%\")\n table.add_row(row)\n\n samdp_data.append(str(table))\n\n samdp_data = \"\\n\".join(samdp_data)\n\n os.makedirs(save_dir, exist_ok=True)\n with open(os.path.join(save_dir, \"samdp.txt\"), \"w\") as f:\n f.write(samdp_data)\n\n def _generate_graph(self) -> nx.Graph:\n \"\"\"Create a graph of this dataset's SAMDP using NetworkX.\n\n Each node represents a cluster from self.dataset[\"clusters\"] and the edges\n represent the paths the agent takes in the dataset between clusters. An edge is\n added for each action taken that brings the agent from one cluster to another.\n For each action from a cluster, only the edge with the highest probability is\n shown, meaning there are other clusters that action can move the agent to but\n only the highest probability edge is shown.\n\n Returns:\n nx.Graph: NetworkX Graph representation of the SAMDP\n \"\"\"\n\n logging.info(\"Generating SAMDP Graph.\")\n\n G = nx.MultiDiGraph()\n\n G.add_nodes_from([f\"Cluster \\n{i}\" for i in range(self.num_clusters)])\n\n for from_cluster_id in range(self.num_clusters):\n from_cluster = f\"Cluster \\n{from_cluster_id}\"\n for action_id in range(self.num_actions):\n for to_cluster_id in range(self.num_clusters):\n to_cluster = f\"Cluster \\n{to_cluster_id}\"\n\n prob = self.samdp[from_cluster_id, action_id, to_cluster_id]\n\n if not prob == 0 and not from_cluster_id == to_cluster_id:\n G.add_edge(\n from_cluster,\n to_cluster,\n weight=prob,\n action=action_id,\n color=COLORS[action_id],\n )\n\n self.graph = G\n self._set_node_attributes(self.graph)\n return self.graph\n\n def _generate_simplified_graph(self) -> nx.Graph:\n \"\"\"Generate a simplified version of the SAMDP.\n\n In this graph, specific actions are not shown in the connections between nodes.\n Instead, a black line shows the connections between nodes.\n\n Returns:\n nx.Graph: Simplified version of the SAMDP with less informative connections.\n \"\"\"\n G = nx.MultiDiGraph()\n G.add_nodes_from([f\"Cluster \\n{i}\" for i in range(self.num_clusters)])\n\n for from_cluster_id in range(self.num_clusters):\n from_cluster = f\"Cluster \\n{from_cluster_id}\"\n for to_cluster_id in range(self.num_clusters):\n to_cluster = f\"Cluster \\n{to_cluster_id}\"\n\n prob = np.sum(self.samdp[from_cluster_id, -1, to_cluster_id])\n if not prob == 0 and not from_cluster_id == to_cluster_id:\n G.add_edge(\n from_cluster,\n to_cluster,\n weight=prob,\n action=-1,\n color=\"#000000\",\n )\n\n self._set_node_attributes(G)\n return G\n\n def _set_node_attributes(self, graph: nx.Graph):\n \"\"\"Set the attributes of each node in the graph.\n\n Args:\n graph (nx.Graph): Graph object\n \"\"\"\n self._set_node_colors(graph)\n self._set_node_edges(graph)\n\n def _set_node_colors(self, graph: nx.Graph):\n \"\"\"Set the colors of each node in the graph.\n\n Args:\n graph (nx.Graph): Graph object\n \"\"\"\n node_colors = {}\n for i in range(self.num_clusters):\n node_colors[f\"Cluster \\n{i}\"] = COLORS[i]\n nx.set_node_attributes(graph, node_colors, \"color\")\n\n def _set_node_edges(self, graph: nx.Graph):\n \"\"\"Set the colors for the edges of each node in the graph.\n\n Initial nodes have a green border, intermediate have a black border, and terminal\n have a red border.\n\n Args:\n graph (nx.Graph): Graph object\n \"\"\"\n start_clusters = set(self.clusters[self.dataset.start_indices])\n term_clusters = set(self.clusters[self.dataset.term_indices])\n\n node_edges = {}\n for node_id in range(self.num_clusters):\n node_name = f\"Cluster \\n{node_id}\"\n if node_id in start_clusters:\n node_edges[node_name] = \"g\"\n elif node_id in term_clusters:\n node_edges[node_name] = \"r\"\n else:\n node_edges[node_name] = \"k\"\n\n nx.set_node_attributes(graph, node_edges, \"edge_color\")\n\n def _generate_bfs_pos(self) -> Dict[nx.Graph.node, Tuple[int, int]]:\n \"\"\"Generate the positioning for each node in the graph by breadth first search.\n\n Initial nodes are on the left, termainl nodes on the right, and intermediate\n nodes in between.\n\n Returns:\n Dict[Node, Tuple[int, int]]: Positions for each node in the graph.\n \"\"\"\n pos = {}\n start_clusters = set(self.clusters[self.dataset.start_indices])\n term_clusters = set(self.clusters[self.dataset.term_indices])\n initial_nodes = [f\"Cluster \\n{i}\" for i in start_clusters]\n terminal_nodes = [f\"Cluster \\n{i}\" for i in term_clusters]\n\n bfs_layers = list(nx.bfs_layers(self.graph, initial_nodes))\n\n layers = []\n for i, layer_list in enumerate(bfs_layers):\n layers.append([])\n for j, node in enumerate(layer_list):\n if node in terminal_nodes:\n pass\n pos[node] = (i, j)\n layers[i].append(node)\n\n depth = len(bfs_layers)\n for e, node in enumerate(terminal_nodes):\n pos[node] = (depth, e)\n\n return pos\n\n def _generate_edge_arcs(self, pos, edges: List) -> List[float]:\n \"\"\"Generate the arcs for the connections between nodes.\n\n Connections have arcs if they overlap with other connections or go through\n nodes.\n\n Args:\n pos (Dict[Node, Tuple[int, int]]): Positions of each node in the graph.\n edges (List): List of edges\n\n Returns:\n List[float]: List of edge arcs.\n \"\"\"\n edge_arcs = []\n for edge in edges:\n from_node_x, from_node_y = pos[edge[0]]\n to_node_x, to_node_y = pos[edge[1]]\n\n reverse_edge = self.graph.has_edge(edge[1], edge[0])\n\n arc = edge[2]\n if (from_node_x == to_node_x or from_node_y == to_node_y) or reverse_edge:\n arc += 1\n\n edge_arcs.append(0.05 * arc)\n\n return edge_arcs\n\n def save_complete_graph(self, file_path: str) -> nx.Graph:\n \"\"\"Save the complete SAMDP as a matplotlib graph.\n\n Args:\n file_path (str): Path to save the graph image to.\n\n Returns:\n nx.Graph: Complete SAMDP graph\n \"\"\"\n _ = plt.figure(figsize=(30, 15))\n plt.title(\"Complete SAMDP\")\n\n pos = self._generate_bfs_pos()\n edge_arcs = self._generate_edge_arcs(pos, self.graph.edges(keys=True))\n\n colors = [node[1][\"color\"] for node in self.graph.nodes(data=True)]\n node_edges = [node[1][\"edge_color\"] for node in self.graph.nodes(data=True)]\n\n nx.draw_networkx_nodes(\n self.graph,\n pos,\n node_size=3500,\n node_color=colors,\n edgecolors=node_edges,\n linewidths=5,\n )\n\n nx.draw_networkx_labels(self.graph, pos, font_color=\"whitesmoke\")\n\n for i, edge in enumerate(self.graph.edges(data=True)):\n nx.draw_networkx_edges(\n self.graph,\n pos,\n edgelist=[edge],\n connectionstyle=f\"arc3,rad={edge_arcs[i]}\",\n edge_color=edge[2][\"color\"],\n alpha=max(0, min(edge[2][\"weight\"] + 0.1, 1)),\n node_size=3500,\n arrowsize=25,\n )\n\n handles = [Patch(color=COLORS[i]) for i in range(self.num_actions)]\n labels = [f\"Action {i}\" for i in range(self.num_actions)]\n leg_title = \"Actions\"\n legend = {\"handles\": handles, \"labels\": labels, \"title\": leg_title}\n legend.update(\n {\"bbox_to_anchor\": (1.0, 1.0), \"loc\": \"upper left\", \"fontsize\": \"xx-large\"}\n )\n plt.legend(**legend)\n\n plt.tight_layout()\n logging.info(f\"Saving complete SAMDP graph png to {file_path}...\")\n os.makedirs(os.path.dirname(file_path), exist_ok=True)\n plt.savefig(file_path, format=\"PNG\")\n plt.close()\n\n return self.graph\n\n def save_simplified_graph(self, file_path: str) -> nx.Graph:\n \"\"\"Save a simplified version of the SAMDP graph.\n\n Edges do not include information about the action taken.\n\n Args:\n file_path (str): Path to save the SAMDP graph to.\n\n Returns:\n nx.Graph: Simplified SAMDP graph\n \"\"\"\n _ = plt.figure(figsize=(20, 10))\n plt.title(\"Simplified SAMDP\")\n\n G = self._generate_simplified_graph()\n pos = self._generate_bfs_pos()\n\n colors = [node[1][\"color\"] for node in self.graph.nodes(data=True)]\n node_edges = [node[1][\"edge_color\"] for node in self.graph.nodes(data=True)]\n\n nx.draw_networkx_nodes(\n G,\n pos,\n node_size=3500,\n node_color=colors,\n edgecolors=node_edges,\n linewidths=5,\n )\n\n nx.draw_networkx_labels(G, pos, font_color=\"whitesmoke\")\n\n edges = G.edges(data=True, keys=True)\n edge_arcs = self._generate_edge_arcs(pos, edges)\n\n for i, edge in enumerate(edges):\n nx.draw_networkx_edges(\n self.graph,\n pos,\n edgelist=[edge],\n connectionstyle=f\"arc3,rad={edge_arcs[i]}\",\n edge_color=edge[3][\"color\"],\n alpha=max(0, min(edge[3][\"weight\"] + 0.1, 1)),\n node_size=3500,\n arrowsize=25,\n )\n\n plt.tight_layout()\n logging.info(f\"Saving simplified SAMDP graph png to {file_path}...\")\n os.makedirs(os.path.dirname(file_path), exist_ok=True)\n plt.savefig(file_path, format=\"PNG\")\n plt.close()\n\n return G\n\n def save_likely_graph(self, file_path: str) -> nx.Graph:\n \"\"\"Save a graph where only the most likely edges are shown.\n\n Args:\n file_path (str): Path to save graph image to.\n\n Returns:\n nx.Graph: Graph object with only most likely edges\n \"\"\"\n _ = plt.figure(figsize=(30, 15))\n plt.title(\"Most Probable SAMDP\")\n\n pos = self._generate_bfs_pos()\n\n colors = [node[1][\"color\"] for node in self.graph.nodes(data=True)]\n node_edges = [node[1][\"edge_color\"] for node in self.graph.nodes(data=True)]\n\n nx.draw_networkx_nodes(\n self.graph,\n pos,\n node_size=3500,\n node_color=colors,\n edgecolors=node_edges,\n linewidths=5,\n )\n\n nx.draw_networkx_labels(self.graph, pos, font_color=\"whitesmoke\")\n\n edges = []\n for node in self.graph.nodes():\n out_edges = self.graph.out_edges(node, data=True, keys=True)\n\n for action in range(self.num_actions):\n action_edges = [i for i in out_edges if i[3][\"action\"] == action]\n if not action_edges == []:\n best_edge = sorted(\n action_edges, key=lambda x: (x[3][\"weight\"], x[2]), reverse=True\n )[0]\n edges.append(best_edge)\n\n edge_arcs = self._generate_edge_arcs(pos, edges)\n\n for i, edge in enumerate(edges):\n nx.draw_networkx_edges(\n self.graph,\n pos,\n edgelist=[edge],\n connectionstyle=f\"arc3,rad={edge_arcs[i]}\",\n edge_color=edge[3][\"color\"],\n alpha=max(0, min(edge[3][\"weight\"] + 0.1, 1)),\n node_size=3500,\n arrowsize=25,\n )\n\n handles = [Patch(color=COLORS[i]) for i in range(self.num_actions)]\n labels = [f\"Action {i}\" for i in range(self.num_actions)]\n leg_title = \"Actions\"\n legend = {\"handles\": handles, \"labels\": labels, \"title\": leg_title}\n legend.update(\n {\"bbox_to_anchor\": (1.0, 1.0), \"loc\": \"upper left\", \"fontsize\": \"xx-large\"}\n )\n plt.legend(**legend)\n\n plt.tight_layout()\n logging.info(f\"Saving most probable SAMDP graph png to {file_path}...\")\n os.makedirs(os.path.dirname(file_path), exist_ok=True)\n plt.savefig(file_path, format=\"PNG\")\n plt.close()\n\n graph = nx.MultiDiGraph()\n graph.add_nodes_from(self.graph.nodes(data=True))\n graph.add_edges_from(edges)\n\n return graph\n\n def _find_best_path(\n self,\n from_cluster: str,\n to_cluster: str,\n paths: List[List[Tuple[str, str, int, Dict[str, Any]]]],\n ) -> Tuple[Dict[int, float], List]:\n \"\"\"Calculate the probability of each path being taken.\n\n Args:\n from_cluster (str): Cluster to move from\n to_cluster (str): Cluster to move to\n paths (List[List[Tuple[str, str, int, Dict[str, Any]]]]): All simple paths\n from one cluster to another.\n\n Returns:\n Dict[int, float], List: Dictionary with actions as keys and highest\n probability to reach target from current node, List of edges that make up the\n most probably path between clusters\n \"\"\"\n if len(paths) == 0:\n logging.info(f\"\\tNo paths found from {from_cluster} to {to_cluster}.\")\n return []\n\n probs = {}\n best_paths = {}\n for path in paths:\n prob = 1\n action = path[0][3][\"action\"]\n for e, edge in enumerate(path):\n if e == 0:\n edge_prob = edge[3][\"weight\"]\n else:\n from_node = int(edge[0].split(\" \")[-1])\n to_node = int(edge[1].split(\" \")[-1])\n edge_prob = np.sum(self.samdp[from_node, -1, to_node])\n prob = prob * edge_prob\n\n if action in probs:\n if prob > probs[action]:\n probs[action] = prob\n best_paths[action] = path\n else:\n probs[action] = prob\n best_paths[action] = path\n\n logging.info(\n f\"Highest probability of getting from {from_cluster} to {to_cluster}:\"\n )\n for action in probs:\n logging.info(f\"\\tvia Action {action}: {round(probs[action] * 100, 2)}%\")\n for i, edge in enumerate(best_paths[action]):\n if i == 0:\n weight = round(edge[3][\"weight\"] * 100, 2)\n else:\n from_id = int(edge[0].split(\" \")[-1])\n to_id = int(edge[1].split(\" \")[-1])\n weight = round(self.samdp[from_id, -1, to_id] * 100, 2)\n logging.info(f\"\\t\\t{edge[0]} to {edge[1]} with {weight}%\")\n\n best_action = max(probs, key=probs.get)\n logging.info(\n f\"\\tBest Option: Action {best_action} with \"\n + f\"{round(probs[best_action] * 100, 2)}%\"\n )\n logging.info(\"\\tBest Path:\")\n for i, edge in enumerate(best_paths[best_action]):\n if i == 0:\n weight = round(edge[3][\"weight\"] * 100, 2)\n else:\n from_id = int(edge[0].split(\" \")[-1])\n to_id = int(edge[1].split(\" \")[-1])\n weight = round(self.samdp[from_id, -1, to_id] * 100, 2)\n logging.info(f\"\\t\\t{edge[0]} to {edge[1]} with {weight}%\")\n\n return best_paths[best_action]\n\n def save_paths(\n self,\n from_cluster_id: int,\n to_cluster_id: int,\n file_path: str,\n best_path_only: bool = False,\n verbose=False,\n ):\n \"\"\"Save all paths from one cluster to another.\n\n Args:\n from_cluster_id (int): Cluster to move from\n to_cluster_id (int): Cluster to move to\n file_path (str): Path to save image to\n best_path_only (bool, optional): Do we only want to show the best path.\n Defaults to False.\n verbose (bool, optional): Do we want to show the complete edges instead of the\n simplified. Defaults to False.\n \"\"\"\n from_cluster = f\"Cluster \\n{from_cluster_id}\"\n to_cluster = f\"Cluster \\n{to_cluster_id}\"\n\n if from_cluster not in self.graph.nodes():\n logging.warning(f\"{from_cluster} is not a valid cluster.\")\n return\n\n if to_cluster not in self.graph.nodes():\n logging.warning(f\"{to_cluster} is not a valid cluster.\")\n return\n\n if verbose:\n _ = plt.figure(figsize=(30, 15))\n else:\n _ = plt.figure(figsize=(20, 10))\n plt.title(f\"SAMDP Paths from {from_cluster} to {to_cluster}\")\n\n logging.info(f\"Finding paths from {from_cluster} to {to_cluster}...\")\n\n if verbose:\n graph = copy.deepcopy(self.graph)\n else:\n graph = self._generate_simplified_graph()\n\n out_edges = graph.out_edges(from_cluster)\n graph.remove_edges_from(list(out_edges))\n\n action_out_edges = self.graph.out_edges(from_cluster, data=True, keys=True)\n\n graph.add_edges_from(list(action_out_edges))\n\n paths = list(nx.all_simple_edge_paths(graph, from_cluster, to_cluster))\n\n if len(paths) == 0:\n logging.info(f\"\\tNo paths found from {from_cluster} to {to_cluster}.\")\n plt.close()\n return\n\n updated_paths = []\n full_edge_list = []\n edge_list = []\n for path in paths:\n data_path = []\n for edge in path:\n edge_data = graph.get_edge_data(edge[0], edge[1], edge[2])\n updated_edge = (edge[0], edge[1], edge[2], edge_data)\n data_path.append(updated_edge)\n\n if updated_edge not in full_edge_list:\n full_edge_list.append(updated_edge)\n edge_list.append(edge)\n\n updated_paths.append(data_path)\n\n best_path = self._find_best_path(from_cluster, to_cluster, updated_paths)\n\n if best_path_only:\n full_edge_list = best_path\n edge_list = []\n for edge in best_path:\n edge_list.append((edge[0], edge[1], edge[2]))\n\n subgraph = nx.edge_subgraph(graph, edge_list)\n\n pos = self._generate_bfs_pos()\n edge_arcs = self._generate_edge_arcs(pos, edge_list)\n\n colors = [node[1][\"color\"] for node in subgraph.nodes(data=True)]\n node_edges = [node[1][\"edge_color\"] for node in subgraph.nodes(data=True)]\n\n nx.draw_networkx_nodes(\n subgraph,\n pos,\n node_size=3500,\n node_color=colors,\n edgecolors=node_edges,\n linewidths=5,\n )\n\n nx.draw_networkx_labels(subgraph, pos, font_color=\"whitesmoke\")\n\n for i, edge in enumerate(full_edge_list):\n nx.draw_networkx_edges(\n subgraph,\n pos,\n edgelist=[edge],\n connectionstyle=f\"arc3,rad={edge_arcs[i]}\",\n edge_color=edge[3][\"color\"],\n alpha=max(0, min(edge[3][\"weight\"] + 0.1, 1)),\n node_size=3500,\n arrowsize=25,\n )\n\n handles = [Patch(color=COLORS[i]) for i in range(self.num_actions)]\n labels = [f\"Action {i}\" for i in range(self.num_actions)]\n leg_title = \"Actions\"\n legend = {\"handles\": handles, \"labels\": labels, \"title\": leg_title}\n legend.update(\n {\"bbox_to_anchor\": (1.0, 1.0), \"loc\": \"upper left\", \"fontsize\": \"xx-large\"}\n )\n plt.legend(**legend)\n\n plt.tight_layout()\n logging.info(\n f\"Saving SAMDP path from {from_cluster} to {to_cluster} png to {file_path}...\"\n )\n os.makedirs(os.path.dirname(file_path), exist_ok=True)\n plt.savefig(file_path, format=\"PNG\")\n plt.close()\n\n def save_terminal_paths(\n self,\n file_path: str,\n best_path: bool = False,\n term_cluster_id: Optional[int] = None,\n ):\n \"\"\"Save all paths into all terminal nodes.\n\n Args:\n file_path (str): Path to save image to\n best_path (bool, optional): Do we only want to show the best paths between\n nodes. Defaults to False.\n term_cluster_id (Optional[int], optional): Cluster ID that we want to limit\n paths to instead of all paths. Defaults to None.\n \"\"\"\n graph = copy.deepcopy(self.graph)\n\n term_nodes = []\n for node in graph.nodes(data=True):\n if node[1][\"edge_color\"] == \"r\":\n term_nodes.append(node[0])\n\n if term_cluster_id is not None:\n cluster_node = f\"Cluster \\n{term_cluster_id}\"\n\n if cluster_node not in term_nodes:\n logging.info(f\"Cluster {term_cluster_id} is not a terminal cluster.\")\n return\n\n term_nodes = [cluster_node]\n\n _ = plt.figure(figsize=(20, 10))\n plt.title(f\"All SAMDP connections to terminal cluster {term_cluster_id}\")\n logging.info(f\"Finding connections to terminal cluster {term_cluster_id}...\")\n\n edge_list = []\n full_edge_list = []\n for node in term_nodes:\n full_in_edges = graph.in_edges(node, data=True, keys=True)\n\n if not best_path:\n for edge in full_in_edges:\n full_edge_list.append(edge)\n edge_list.append((edge[0], edge[1], edge[2]))\n else:\n node_dict = {}\n for edge in full_in_edges:\n if edge[0] not in node_dict.keys():\n node_dict[edge[0]] = edge\n else:\n if node_dict[edge[0]][3][\"weight\"] < edge[3][\"weight\"]:\n node_dict[edge[0]] = edge\n\n for edge in node_dict.values():\n full_edge_list.append(edge)\n edge_list.append((edge[0], edge[1], edge[2]))\n\n subgraph = self.graph.edge_subgraph(edge_list)\n\n pos = self._generate_bfs_pos()\n edge_arcs = self._generate_edge_arcs(pos, edge_list)\n\n colors = [node[1][\"color\"] for node in subgraph.nodes(data=True)]\n node_edges = [node[1][\"edge_color\"] for node in subgraph.nodes(data=True)]\n\n nx.draw_networkx_nodes(\n subgraph,\n pos,\n node_size=3500,\n node_color=colors,\n edgecolors=node_edges,\n linewidths=5,\n )\n\n nx.draw_networkx_labels(subgraph, pos, font_color=\"whitesmoke\")\n\n for i, edge in enumerate(full_edge_list):\n nx.draw_networkx_edges(\n subgraph,\n pos,\n edgelist=[edge],\n connectionstyle=f\"arc3,rad={edge_arcs[i]}\",\n edge_color=edge[3][\"color\"],\n node_size=3500,\n arrowsize=25,\n )\n\n handles = [Patch(color=COLORS[i]) for i in range(self.num_actions)]\n labels = [f\"Action {i}\" for i in range(self.num_actions)]\n leg_title = \"Actions\"\n legend = {\"handles\": handles, \"labels\": labels, \"title\": leg_title}\n legend.update(\n {\"bbox_to_anchor\": (1.0, 1.0), \"loc\": \"upper left\", \"fontsize\": \"xx-large\"}\n )\n plt.legend(**legend)\n\n plt.tight_layout()\n logging.info(f\"Saving all SAMDP paths to terminal clusters png to {file_path}...\")\n os.makedirs(os.path.dirname(file_path), exist_ok=True)\n plt.savefig(file_path, format=\"PNG\")\n plt.close()\n\n def save_all_paths_to(\n self, to_cluster_id: int, file_path: str, verbose: bool = False\n ):\n \"\"\"Save all possible paths from an initial node to given node.\n\n Args:\n to_cluster_id (int): Cluster we want to get to\n file_path (str): Path to save image to\n verbose (bool, optional): Do we want to show complete graph edges instead of\n simplified. Defaults to False.\n \"\"\"\n to_cluster = f\"Cluster \\n{to_cluster_id}\"\n\n if to_cluster not in self.graph.nodes():\n logging.warning(f\"{to_cluster} is not a valid cluster.\")\n return\n\n if verbose:\n _ = plt.figure(figsize=(30, 15))\n else:\n _ = plt.figure(figsize=(20, 10))\n plt.title(f\"All SAMDP Paths to {to_cluster}\")\n\n logging.info(f\"Finding paths to {to_cluster}...\")\n\n if verbose:\n graph = copy.deepcopy(self.graph)\n else:\n graph = self._generate_simplified_graph()\n\n in_edges = graph.in_edges(to_cluster)\n graph.remove_edges_from(list(in_edges))\n\n action_in_edges = self.graph.in_edges(to_cluster, data=True, keys=True)\n\n graph.add_edges_from(list(action_in_edges))\n\n paths = []\n\n for node in graph.nodes():\n if node == to_cluster:\n continue\n\n paths += list(nx.all_simple_edge_paths(graph, node, to_cluster))\n\n if len(paths) == 0:\n logging.info(f\"\\tNo paths found to {to_cluster}.\")\n plt.close()\n return\n\n updated_paths = []\n full_edge_list = []\n edge_list = []\n for path in paths:\n data_path = []\n for edge in path:\n edge_data = graph.get_edge_data(edge[0], edge[1], edge[2])\n updated_edge = (edge[0], edge[1], edge[2], edge_data)\n data_path.append(updated_edge)\n\n if updated_edge not in full_edge_list:\n full_edge_list.append(updated_edge)\n edge_list.append(edge)\n\n updated_paths.append(data_path)\n\n subgraph = nx.edge_subgraph(graph, edge_list)\n\n pos = self._generate_bfs_pos()\n edge_arcs = self._generate_edge_arcs(pos, edge_list)\n\n colors = [node[1][\"color\"] for node in subgraph.nodes(data=True)]\n node_edges = [node[1][\"edge_color\"] for node in subgraph.nodes(data=True)]\n\n nx.draw_networkx_nodes(\n subgraph,\n pos,\n node_size=3500,\n node_color=colors,\n edgecolors=node_edges,\n linewidths=5,\n )\n\n nx.draw_networkx_labels(subgraph, pos, font_color=\"whitesmoke\")\n\n for i, edge in enumerate(full_edge_list):\n nx.draw_networkx_edges(\n subgraph,\n pos,\n edgelist=[edge],\n connectionstyle=f\"arc3,rad={edge_arcs[i]}\",\n edge_color=edge[3][\"color\"],\n alpha=max(0, min(edge[3][\"weight\"] + 0.1, 1)),\n node_size=3500,\n arrowsize=25,\n )\n\n handles = [Patch(color=COLORS[i]) for i in range(self.num_actions)]\n labels = [f\"Action {i}\" for i in range(self.num_actions)]\n leg_title = \"Actions\"\n legend = {\"handles\": handles, \"labels\": labels, \"title\": leg_title}\n legend.update(\n {\"bbox_to_anchor\": (1.0, 1.0), \"loc\": \"upper left\", \"fontsize\": \"xx-large\"}\n )\n plt.legend(**legend)\n\n plt.tight_layout()\n logging.info(f\"Saving all SAMDP paths to {to_cluster} png to {file_path}...\")\n os.makedirs(os.path.dirname(file_path), exist_ok=True)\n plt.savefig(file_path, format=\"PNG\")\n plt.close()" } ]
import os import networkx as nx import numpy as np import pytest from arlin.analysis.visualization import COLORS from arlin.samdp import SAMDP
8,981
@pytest.fixture def samdp(random_dataset, random_clusters): samdp = SAMDP(random_clusters[0], random_dataset) return samdp class TestSAMDP: def test_init(self, random_clusters, random_dataset): samdp = SAMDP(random_clusters[0], random_dataset) assert np.array_equal(samdp.clusters, random_clusters[0]) assert samdp.dataset == random_dataset def test_generate(self, samdp): samdp_obj = samdp._generate() num_clusters = max(samdp.clusters) + 1 num_actions = samdp.dataset.env.action_space.n assert samdp_obj.shape == (num_clusters, num_actions + 1, num_clusters) for cluster_id in range(num_clusters): for action in range(num_actions): if sum(samdp_obj[cluster_id][action]) != 0: assert sum(samdp_obj[cluster_id][action]) == 1 assert np.sum(np.isnan(samdp_obj)) == 0 for i in range(samdp.dataset.num_datapoints): if samdp.dataset.terminateds[i] or samdp.dataset.truncateds[i]: continue action = samdp.dataset.actions[i] from_cluster = samdp.clusters[i] to_cluster = samdp.clusters[i + 1] if from_cluster != to_cluster: action_prob = samdp.samdp_counts[from_cluster][action][to_cluster] / sum( samdp.samdp_counts[from_cluster][action][:] ) assert samdp_obj[from_cluster][action][to_cluster] == action_prob total_prob = samdp.samdp_counts[from_cluster][-1][to_cluster] / sum( samdp.samdp_counts[from_cluster][-1][:] ) assert samdp_obj[from_cluster][-1][to_cluster] == total_prob def test_save_txt(self, samdp, tmpdir): samdp.save_txt(tmpdir) assert os.path.isfile(os.path.join(tmpdir, "samdp.txt")) def test_generate_graph(self, samdp): graph = samdp._generate_graph() num_clusters = max(samdp.clusters) + 1 assert graph.number_of_nodes() == num_clusters start_clusters = set(samdp.clusters[samdp.dataset.start_indices]) term_clusters = set(samdp.clusters[samdp.dataset.term_indices]) nodes = graph.nodes(data=True) for i in range(num_clusters): if i in start_clusters: edge_color = "g" elif i in term_clusters: edge_color = "r" else: edge_color = "k"
@pytest.fixture def samdp(random_dataset, random_clusters): samdp = SAMDP(random_clusters[0], random_dataset) return samdp class TestSAMDP: def test_init(self, random_clusters, random_dataset): samdp = SAMDP(random_clusters[0], random_dataset) assert np.array_equal(samdp.clusters, random_clusters[0]) assert samdp.dataset == random_dataset def test_generate(self, samdp): samdp_obj = samdp._generate() num_clusters = max(samdp.clusters) + 1 num_actions = samdp.dataset.env.action_space.n assert samdp_obj.shape == (num_clusters, num_actions + 1, num_clusters) for cluster_id in range(num_clusters): for action in range(num_actions): if sum(samdp_obj[cluster_id][action]) != 0: assert sum(samdp_obj[cluster_id][action]) == 1 assert np.sum(np.isnan(samdp_obj)) == 0 for i in range(samdp.dataset.num_datapoints): if samdp.dataset.terminateds[i] or samdp.dataset.truncateds[i]: continue action = samdp.dataset.actions[i] from_cluster = samdp.clusters[i] to_cluster = samdp.clusters[i + 1] if from_cluster != to_cluster: action_prob = samdp.samdp_counts[from_cluster][action][to_cluster] / sum( samdp.samdp_counts[from_cluster][action][:] ) assert samdp_obj[from_cluster][action][to_cluster] == action_prob total_prob = samdp.samdp_counts[from_cluster][-1][to_cluster] / sum( samdp.samdp_counts[from_cluster][-1][:] ) assert samdp_obj[from_cluster][-1][to_cluster] == total_prob def test_save_txt(self, samdp, tmpdir): samdp.save_txt(tmpdir) assert os.path.isfile(os.path.join(tmpdir, "samdp.txt")) def test_generate_graph(self, samdp): graph = samdp._generate_graph() num_clusters = max(samdp.clusters) + 1 assert graph.number_of_nodes() == num_clusters start_clusters = set(samdp.clusters[samdp.dataset.start_indices]) term_clusters = set(samdp.clusters[samdp.dataset.term_indices]) nodes = graph.nodes(data=True) for i in range(num_clusters): if i in start_clusters: edge_color = "g" elif i in term_clusters: edge_color = "r" else: edge_color = "k"
node = (f"Cluster \n{i}", {"edge_color": edge_color, "color": COLORS[i]})
0
2023-11-08 13:57:45+00:00
12k
Giftify-Bot/Giftify-Bot
models/giveaways.py
[ { "identifier": "ChannelConfig", "path": "models/giveaway_settings.py", "snippet": "class ChannelConfig:\n \"\"\"Represents the configuration settings for a channel.\n\n Attributes\n ----------\n channel: Union[discord.TextChannel, discord.CategoryChannel]\n The channel associated with the config.\n guild: discord.Guild\n The guild to which the channel belongs.\n required_roles: List[discord.Role]\n The list of default required roles.\n blacklisted_roles: List[discord.Role]\n The list of default blacklisted roles.\n bypass_roles: List[discord.Role]\n The list of default bypass_roles.\n multiplier_roles: Dict[discord.Role, int]\n The role and number of multiplier_roles entries mapping.\n ping: Optional[discord.Role]\n The default ping role for some channel.\n \"\"\"\n\n __slots__: Tuple[str, ...] = (\n \"channel\",\n \"guild\",\n \"required_roles\",\n \"blacklisted_roles\",\n \"bypass_roles\",\n \"multiplier_roles\",\n \"ping\",\n )\n\n def __init__(\n self,\n channel: Union[discord.TextChannel, discord.CategoryChannel],\n guild: discord.Guild,\n *,\n required_roles: List[discord.Role],\n blacklisted_roles: List[discord.Role],\n bypass_roles: List[discord.Role],\n multiplier_roles: Dict[discord.Role, int],\n ping: Optional[discord.Role] = None,\n ):\n self.channel = channel\n self.guild = guild\n self.required_roles = required_roles\n self.blacklisted_roles = blacklisted_roles\n self.bypass_roles = bypass_roles\n self.multiplier_roles = multiplier_roles\n self.ping = ping\n\n def __repr__(self):\n return f\"<ChannelConfig channel={self.channel!r}>\"\n\n @classmethod\n def from_data(\n cls,\n guild: discord.Guild,\n data: asyncpg.Record,\n ) -> Optional[\"ChannelConfig\"]:\n \"\"\"Create a ChannelConfig object from given data.\n\n Parameters\n ----------\n guild: discord.Guild\n The guild to which the channel belongs.\n value: Any\n The new value for the column.\n\n Returns\n -------\n ChannelConfig\n The updated `ChannelConfig` instance.\n \"\"\"\n\n data = dict(data)\n\n # We do not need these\n channel_id = data.pop(\"channel\")\n channel = guild.get_channel(channel_id)\n if channel is None:\n return\n\n assert isinstance(channel, (discord.TextChannel, discord.CategoryChannel))\n\n data[\"ping\"] = guild.get_role(data[\"ping\"])\n data[\"required_roles\"] = [\n guild.get_role(role) for role in data[\"required_roles\"] if role is not None\n ]\n data[\"blacklisted_roles\"] = [\n guild.get_role(role)\n for role in data[\"blacklisted_roles\"]\n if role is not None\n ]\n data[\"bypass_roles\"] = [\n guild.get_role(role) for role in data[\"bypass_roles\"] if role is not None\n ]\n data[\"multiplier_roles\"] = {\n guild.get_role(role): multiplier_roles\n for role, multiplier_roles in data[\"multiplier_roles\"].items()\n if role is not None\n }\n\n data.pop(\"guild\")\n\n return cls(channel, guild, **data)\n\n async def update(\n self, column: str, value: Any, pool: asyncpg.Pool\n ) -> \"ChannelConfig\":\n \"\"\"Update the specified column with the provided value in the database.\n\n Parameters\n ----------\n column: str\n The column to be updated.\n value: Any\n The new value for the column.\n pool: asyncpg.Pool\n The database connection pool.\n\n Raises\n ------\n ValueError\n If the provided column is not a valid column name in `self.__slots__`.\n\n Returns\n -------\n ChannelConfig\n The updated `ChannelConfig` instance.\n \"\"\"\n if column not in self.__slots__:\n raise ValueError(f\"Invalid column: {column}\")\n\n setattr(self, column, value)\n\n if isinstance(value, list):\n value = [role.id for role in value if role is not None]\n elif isinstance(value, dict):\n value = {\n role.id: multiplier_roles\n for role, multiplier_roles in value.items()\n if role is not None\n }\n elif isinstance(value, discord.Role):\n value = value.id\n else:\n raise ValueError(\"Unknown type given.\")\n\n query = f\"\"\"INSERT INTO channel_configs (guild, channel, {column}) VALUES ($1, $2, $3)\n ON CONFLICT (guild, channel) DO\n UPDATE SET {column} = excluded.{column}\"\"\"\n\n await pool.execute(\n query,\n self.guild.id,\n self.channel.id,\n value,\n )\n\n return self\n\n @classmethod\n async def create(\n cls,\n guild: discord.Guild,\n channel: Union[discord.TextChannel, discord.CategoryChannel],\n pool: asyncpg.Pool,\n ) -> \"ChannelConfig\":\n query = \"\"\"INSERT INTO channel_configs (guild, channel) VALUES ($1, $2) RETURNING *\"\"\"\n\n record = await pool.fetchrow(\n query,\n guild.id,\n channel.id,\n )\n\n instance = cls.from_data(guild, record)\n assert instance is not None # Since we just created it.\n return instance\n\n @staticmethod\n async def delete(channel_id: int, guild_id: int, pool: asyncpg.Pool):\n \"\"\"Delete the current ChannelConfig object.\n\n Parameters\n ----------\n channel_id: int\n The ID of the channel.\n guild_id: int\n The ID of the guild.\n pool: asyncpg.Pool\n The database connection pool.\n \"\"\"\n\n query = \"\"\"DELETE FROM channel_configs\n WHERE guild = $ AND channel = $2\"\"\"\n\n await pool.execute(query, guild_id, channel_id)" }, { "identifier": "GuildConfig", "path": "models/giveaway_settings.py", "snippet": "class GuildConfig:\n \"\"\"Represents the configuration settings for a guild.\n\n Parameters\n ----------\n guild: discord.Guild\n The guild associated with the configuration.\n logging: Optional[discord.TextChannel]\n The logging text channel for the guild.\n ping: Optional[discord.Role]\n The role to ping for notifications.\n reaction: str\n The reaction used for giveaways.\n participants_reaction,: str\n The reaction used for giveaways participants button.\n required_roles: List[discord.Role]\n The default roles required to join giveaway.\n blacklisted_roles: List[discord.Role]\n The default roles blacklisted from joining a giveaway.\n bypass_roles: List[discord.Role]\n The roles that bypass_roles certain restrictions.\n multiplier_roles: Dict[discord.Role, int]\n The multiplier_roles points assigned to each role.\n managers: List[discord.Role]\n The roles with manager permissions.\n dm_winner: bool\n Whether to send a direct message to the winner.\n dm_host: bool\n Whether to send a direct message to the host.\n channel_settings: List[ChannelConfig]\n The settings for each channel.\n color: discord.Colour\n The color used for messages.\n button_style: discord.ButtonStyle\n The style of the button.\n end_message: str\n The message sent when a giveaway ends.\n reroll_message: str\n The message sent when a giveaway rerolls.\n dm_message: str\n The direct message sent to winner.\n dm_host_message: str\n The direct message sent to host.\n gw_header: str\n The header for the giveaway message.\n gw_end_header: str\n The header for the giveaway end.\n \"\"\"\n\n __slots__: Tuple[str, ...] = (\n \"guild\",\n \"logging\",\n \"ping\",\n \"reaction\",\n \"participants_reaction\",\n \"required_roles\",\n \"blacklisted_roles\",\n \"bypass_roles\",\n \"multiplier_roles\",\n \"managers\",\n \"dm_winner\",\n \"dm_host\",\n \"channel_settings\",\n \"color\",\n \"button_style\",\n \"end_message\",\n \"reroll_message\",\n \"dm_message\",\n \"dm_host_message\",\n \"gw_header\",\n \"gw_end_header\",\n )\n\n def __init__(\n self,\n guild: discord.Guild,\n *,\n logging: Optional[discord.TextChannel],\n ping: Optional[discord.Role],\n reaction: str,\n participants_reaction: str,\n required_roles: List[discord.Role],\n blacklisted_roles: List[discord.Role],\n bypass_roles: List[discord.Role],\n multiplier_roles: Dict[discord.Role, int],\n managers: List[discord.Role],\n dm_winner: bool,\n dm_host: bool,\n channel_settings: List[ChannelConfig],\n color: discord.Colour,\n button_style: discord.ButtonStyle,\n end_message: str,\n reroll_message: str,\n dm_message: str,\n dm_host_message: str,\n gw_header: str,\n gw_end_header: str,\n ):\n self.guild = guild\n self.logging = logging\n self.ping = ping\n self.reaction = reaction\n self.participants_reaction = participants_reaction\n self.required_roles = required_roles\n self.blacklisted_roles = blacklisted_roles\n self.bypass_roles = bypass_roles\n self.multiplier_roles = multiplier_roles\n self.managers = managers\n self.dm_winner = dm_winner\n self.dm_host = dm_host\n self.channel_settings = channel_settings\n self.color = color\n self.button_style = button_style\n self.end_message = end_message\n self.reroll_message = reroll_message\n self.dm_host_message = dm_host_message\n self.dm_message = dm_message\n self.gw_header = gw_header\n self.gw_end_header = gw_end_header\n\n def __repr__(self):\n return f\"<GuildConfig guild={self.guild!r}>\"\n\n @staticmethod\n async def _create_config(guild_id: int, pool: asyncpg.Pool) -> asyncpg.Record:\n return await pool.fetchrow(\n \"INSERT INTO configs (guild) VALUES ($1) RETURNING *\",\n guild_id,\n )\n\n @classmethod\n def _from_data(\n cls,\n guild: discord.Guild,\n data: asyncpg.Record,\n channel_data: List[asyncpg.Record],\n ) -> \"GuildConfig\":\n data = dict(data)\n data[\"color\"] = discord.Colour(data[\"color\"])\n\n data[\"logging\"] = guild.get_channel(data[\"logging\"])\n data[\"ping\"] = guild.get_role(data[\"ping\"])\n data[\"required_roles\"] = [\n guild.get_role(role) for role in data[\"required_roles\"] if role is not None\n ]\n data[\"blacklisted_roles\"] = [\n guild.get_role(role)\n for role in data[\"blacklisted_roles\"]\n if role is not None\n ]\n data[\"bypass_roles\"] = [\n guild.get_role(role) for role in data[\"bypass_roles\"] if role is None\n ]\n data[\"multiplier_roles\"] = {\n guild.get_role(role): multiplier\n for role, multiplier in data[\"multiplier_roles\"].items()\n if role is not None and multiplier > 1\n }\n data[\"managers\"] = [\n guild.get_role(role) for role in data[\"managers\"] if role is not None\n ]\n\n data[\"button_style\"] = discord.utils.get(\n discord.ButtonStyle, value=data[\"button_style\"]\n )\n\n data[\"channel_settings\"] = [\n channel_setting\n for record in channel_data\n if (channel_setting := ChannelConfig.from_data(guild, record))\n ]\n\n data.pop(\"guild\") # We do not need this.\n\n return cls(guild, **data)\n\n def to_dict(self) -> GuildConfigData:\n \"\"\"Converts this GuildConfig object into a dict.\"\"\"\n\n data = GuildConfigData(\n guild=self.guild.id,\n reaction=self.reaction,\n participants_reaction=self.participants_reaction,\n required_roles=[\n role.id for role in self.required_roles if role is not None\n ],\n blacklisted_roles=[\n role.id for role in self.blacklisted_roles if role is not None\n ],\n bypass_roles=[role.id for role in self.bypass_roles if role is not None],\n multiplier_roles={\n role.id: multiplier_roles\n for role, multiplier_roles in self.multiplier_roles.items()\n if role is not None\n },\n managers=[role.id for role in self.managers if role is not None],\n dm_winner=self.dm_winner,\n dm_host=self.dm_host,\n color=int(self.color),\n button_style=self.button_style.value,\n end_message=self.end_message,\n reroll_message=self.reroll_message,\n dm_message=self.dm_message,\n dm_host_message=self.dm_host_message,\n gw_header=self.gw_header,\n gw_end_header=self.gw_end_header,\n ) # type: ignore\n if self.logging:\n data[\"logging\"] = self.logging.id\n if self.ping:\n data[\"ping\"] = self.ping.id\n return data\n\n @classmethod\n async def fetch(cls, guild: discord.Guild, pool: asyncpg.Pool) -> \"GuildConfig\":\n \"\"\"Create a GuildConfig instance from data retrieved from a database.\n\n Parameters\n ----------\n guild: discord.Guild\n The discord guild.\n pool: asyncpg.Pool\n The database connection pool.\n\n Returns\n -------\n GuildConfig\n An instance of GuildConfig populated with the retrieved data.\n \"\"\"\n\n data = await pool.fetchrow(\"SELECT * FROM configs WHERE guild = $1\", guild.id)\n channel_data: List[asyncpg.Record] = await pool.fetch(\n \"SELECT * FROM channel_configs WHERE guild = $1\", guild.id\n )\n\n if not data:\n data: asyncpg.Record = await cls._create_config(guild.id, pool)\n\n return cls._from_data(guild, data, channel_data)\n\n async def update(\n self, column: str, value: Any, pool: asyncpg.Pool\n ) -> \"GuildConfig\":\n \"\"\"Update the specified column with the provided value in the database.\n\n Parameters\n ----------\n column: str\n The column to be updated.\n value: Any\n The new value for the column.\n pool: asyncpg.Pool\n The database connection pool.\n\n Raises\n ------\n ValueError\n If the provided column is not a valid column name in `self.__slots__`.\n\n Returns\n -------\n GuildConfig\n The updated `GuildConfig` instance.\n \"\"\"\n if column not in self.__slots__:\n raise ValueError(f\"Invalid column: {column}\")\n\n setattr(self, column, value)\n\n data = self.to_dict()\n\n columns = \", \".join(data.keys())\n placeholders = \", \".join([f\"${i+1}\" for i in range(len(data))])\n update_clause = \", \".join(\n [f\"{key} = EXCLUDED.{key}\" for key in data.keys() if key != \"guild\"]\n )\n\n query = f\"\"\"\n INSERT INTO configs ({columns}) \n VALUES ({placeholders})\n ON CONFLICT (guild) DO \n UPDATE SET {update_clause}\n \"\"\"\n\n values = list(data.values())\n await pool.execute(query, *values)\n return self\n\n @overload\n async def get_channel_config(\n self,\n channel: Union[discord.TextChannel, discord.CategoryChannel],\n create_if_not_exists: bool = True,\n pool: Optional[asyncpg.Pool] = None,\n ) -> ChannelConfig:\n ...\n\n @overload\n async def get_channel_config(\n self,\n channel: Union[discord.TextChannel, discord.CategoryChannel],\n create_if_not_exists: bool = False,\n pool: Optional[asyncpg.Pool] = None,\n ) -> Optional[ChannelConfig]:\n ...\n\n async def get_channel_config(\n self,\n channel: Union[discord.TextChannel, discord.CategoryChannel],\n create_if_not_exists: bool = True,\n pool: Optional[asyncpg.Pool] = None,\n ) -> Optional[ChannelConfig]:\n \"\"\"\n Retrieves the configuration for a specific channel.\n\n Parameters\n ----------\n channel: Union[discord.TextChannel, discord.CategoryChannel]\n The channel for which to retrieve the configuration.\n create_if_not_exists: Optional[bool]\n Whether to create a new configuration if it doesn't exist. Default is True.\n pool: Optional[asyncpg.Pool]\n The connection pool for interacting with the database.\n\n Returns\n -------\n Optional[ChannelConfig]\n The ChannelConfig object if it exists, or None if it doesn't exist and create_if_not_exists is set to False.\n\n Raises\n ------\n MaxChannelConfigCreationError\n If create_if_not_exists is True and the maximum number of channel configurations has already been reached.\n \"\"\"\n\n config = discord.utils.get(self.channel_settings, channel=channel)\n if config is not None:\n return config\n\n if create_if_not_exists:\n if len(self.channel_settings) >= 25:\n raise MaxChannelConfigCreationError()\n else:\n if pool:\n config = await ChannelConfig.create(channel.guild, channel, pool)\n self.channel_settings.append(config)\n return config\n\n return None" }, { "identifier": "GIFT_EMOJI", "path": "utils/constants.py", "snippet": "GIFT_EMOJI = \"<:GiftifyGift:1119664021914796125> \"" }, { "identifier": "GiveawayError", "path": "utils/exceptions.py", "snippet": "class GiveawayError(Exception):\r\n \"\"\"Error raised in a giveaway.\"\"\"\r" }, { "identifier": "bold", "path": "utils/functions.py", "snippet": "def bold(message: str) -> str:\n return f\"**{message}**\"" }, { "identifier": "safe_format", "path": "utils/functions.py", "snippet": "def safe_format(message: str, **kwargs) -> str:\n \"\"\"A poorly written format function.\"\"\"\n for key, value in kwargs.items():\n formatted_key = \"{\" + key + \"}\"\n message = message.replace(formatted_key, str(value))\n return message" }, { "identifier": "Interaction", "path": "utils/tree.py", "snippet": "class CommandTree(app_commands.CommandTree):\r\n async def on_error(\r\n self,\r\n interaction: Interaction,\r\n error: app_commands.AppCommandError,\r\n ) -> None:\r" }, { "identifier": "BaseView", "path": "utils/view.py", "snippet": "class BaseView(discord.ui.View):\r\n children: List[Union[discord.ui.Button, discord.ui.Select]]\r\n message: Optional[Union[discord.Message, discord.InteractionMessage]] = None\r\n author: Optional[Union[discord.Member, discord.User]] = None\r\n\r\n async def on_error(\r\n self, interaction: Interaction, error: Exception, item: discord.ui.Item\r\n ) -> None:\r\n if isinstance(error, GiveawayError):\r\n embed = discord.Embed(\r\n title=\"An error was raised while executing this command!\",\r\n description=f\"{WARN_EMOJI} {str(error)}\",\r\n color=discord.Colour.red(),\r\n )\r\n view = discord.ui.View()\r\n button = discord.ui.Button(\r\n label=\"Support\", url=\"https://discord.gg/GQSGChbEKz\"\r\n )\r\n view.add_item(button)\r\n\r\n await interaction.followup.send(embed=embed, view=view, ephemeral=True)\r\n elif isinstance(error, ButtonOnCooldown):\r\n embed = discord.Embed(\r\n title=\"Stop clicking the button too fast!\",\r\n description=f\"{WARN_EMOJI} You are clicking the button too fast. Please retry after {error.retry_after: .2f}s.\",\r\n color=discord.Colour.red(),\r\n )\r\n view = discord.ui.View()\r\n button = discord.ui.Button(\r\n label=\"Support\", url=\"https://discord.gg/GQSGChbEKz\"\r\n )\r\n view.add_item(button)\r\n\r\n await interaction.followup.send(embed=embed, view=view, ephemeral=True)\r\n else:\r\n if not isinstance(\r\n error, (discord.HTTPException, discord.errors.InteractionResponded)\r\n ):\r\n if not interaction.response.is_done():\r\n await interaction.response.defer(thinking=True, ephemeral=True)\r\n\r\n embed = discord.Embed(\r\n title=\"An error was raised while executing this command!\",\r\n description=f\"{WARN_EMOJI} An unknown error occurred, my developers have been notified about this error.\",\r\n color=discord.Colour.red(),\r\n )\r\n view = discord.ui.View()\r\n button = discord.ui.Button(\r\n label=\"Support\", url=\"https://discord.gg/GQSGChbEKz\"\r\n )\r\n view.add_item(button)\r\n\r\n await interaction.followup.send(embed=embed, view=view, ephemeral=True)\r\n sentry_sdk.capture_exception(error)\r\n return interaction.client.log_handler.log.exception(\r\n \"Exception occurred in the View:\\n\", exc_info=error\r\n )\r\n\r\n async def on_timeout(self) -> None:\r\n for item in self.children:\r\n if isinstance(item, (discord.ui.Button, discord.ui.Select)):\r\n item.disabled = True\r\n\r\n if self.message is not None:\r\n try:\r\n await self.message.edit(view=self)\r\n except Exception:\r\n pass\r" }, { "identifier": "GiveawayView", "path": "utils/view.py", "snippet": "class GiveawayView(BaseView):\r\n def __init__(\r\n self,\r\n reaction: str = GIVEAWAY_EMOJI,\r\n participants_reaction: str = PARTICIPANTS_EMOJI,\r\n button_style: discord.ButtonStyle = discord.ButtonStyle.blurple,\r\n *,\r\n participant_count: Optional[int] = None,\r\n disabled: bool = False,\r\n ):\r\n super().__init__(timeout=None)\r\n\r\n self.add_item(\r\n GiveawayButton(\r\n reaction,\r\n button_style,\r\n participant_count=participant_count,\r\n disabled=disabled,\r\n )\r\n )\r\n self.add_item(ParticipantsButton(reaction=participants_reaction))\r\n\r\n def key(interaction: Interaction):\r\n return interaction.user.id\r\n\r\n self.cooldown = commands.CooldownMapping.from_cooldown(3, 5, key)\r\n\r\n async def interaction_check(self, interaction: Interaction):\r\n if retry_after := self.cooldown.update_rate_limit(interaction):\r\n raise ButtonOnCooldown(retry_after)\r\n\r\n return await super().interaction_check(interaction)\r" } ]
import contextlib import datetime import random import asyncpg import discord from enum import Enum from typing import TYPE_CHECKING, Dict, List, Optional from models.giveaway_settings import ChannelConfig, GuildConfig from utils.constants import GIFT_EMOJI from utils.exceptions import GiveawayError from utils.functions import bold, safe_format from utils.tree import Interaction from utils.view import BaseView, GiveawayView from bot import Giftify
7,767
else [], multiplier_roles={ role.id: entries for role, entries in multiplier_roles.items() if role is not None } if multiplier_roles else {}, messages={}, messages_required=messages_required, allowed_message_channels=[c.id for c in allowed_message_channels] if allowed_message_channels else [], extra_message_id=extra_message.id if extra_message else None, amari=amari, weekly_amari=weekly_amari, ) @classmethod async def create_entry( cls, bot: Giftify, guild_id: int, channel_id: int, message_id: int, prize: str, host_id: int, winner_count: int, ends: datetime.datetime, required_roles: List[int], blacklisted_roles: List[int], bypass_roles: List[int], donor_id: Optional[int], multiplier_roles: Optional[dict], messages: Optional[dict], messages_required: Optional[int], allowed_message_channels: Optional[List[int]], extra_message_id: Optional[int], amari: Optional[int], weekly_amari: Optional[int], ) -> "Giveaway": """ Create a new Giveaway object and insert it into the database. Parameters ---------- bot: Giftify The bot instance. guild_id: int The ID of the guild (server) where the giveaway is hosted. channel_id: int The ID of the channel where the giveaway is hosted. message_id: int The ID of the message having the giveaway view. prize: str The prize of the giveaway. host_id: int The ID of the user hosting the giveaway. donor_id: int The ID of the donor of the giveaway. winner_count: int The number of winners for the giveaway. ends: datetime.datetime The time when the giveaway ends. required_roles: List[int] The list of role IDs required to participate in the giveaway. blacklisted_roles: List[int] The list of role IDs excluded from participating in the giveaway. bypass_roles: List[int] The list of user IDs exempted from giveaway restrictions. multiplier_roles: Optional[dict] A dictionary containing multiplier_roles criteria for the giveaway. messages: Optional[dict] A dictionary containing message-based criteria for the giveaway. messages_required: Optional[int] The number of messages required to participate in the giveaway. allowed_message_channels: Optional[int] The ID of the channel where the message count is tracked. amari: Optional[int] The required Amari XP to participate in the giveaway. weekly_amari: Optional[int] The required weekly Amari XP to participate in the giveaway. Returns ------- Giveaway The created Giveaway object. """ record = await bot.pool.fetchrow( "INSERT INTO giveaways (guild, channel, message, extra_message, host, donor, prize, winner_count, ends, required_roles, blacklisted_roles, bypass_roles, multiplier_roles, messages, messages_required, messages_channel, amari, weekly_amari) " "VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18) " "RETURNING *", guild_id, channel_id, message_id, extra_message_id, host_id, donor_id, prize, winner_count, ends, required_roles, blacklisted_roles, bypass_roles, multiplier_roles, messages, messages_required, allowed_message_channels, amari, weekly_amari, ) return cls(bot=bot, record=record) async def check_requirements(self, member: discord.Member) -> None: missing_roles = [ role.mention for role_id in self.required_roles if (role := member.guild.get_role(role_id)) and role not in member.roles ] if missing_roles:
from __future__ import annotations if TYPE_CHECKING: class Giveaway: """ Represents a giveaway object. Attributes ---------- bot: Giftify The bot instance to handle the giveaway. guild_id: int The ID of the guild (server) where the giveaway is hosted. channel_id: int The ID of the channel where the giveaway is hosted. message_id: int The ID of the giveaway message. extra_message_id: int The ID of the extra message with giveaway. host_id: int The ID of the user hosting the giveaway. donor_id: int The ID of the user donating for the giveaway. prize: int The prize of the giveaway. winner_count: int The number of winners for the giveaway. winners: List[int] The winners of the giveaway. participants: List[int] The IDs participants for the giveaway. ended: bool Indicates whether the giveaway has ended. ends: datetime.datetime The timestamp when the giveaway will be ended. required_roles: List[int] The list of role IDs required to participate in the giveaway. blacklisted_roles: List[int] The list of role IDs excluded from participating in the giveaway. bypass_roles: List[int] The list of user IDs exempted from giveaway restrictions. multiplier_roles: Optional[dict] A dictionary containing multiplier_roles criteria for the giveaway. messages: Optional[dict] A dictionary containing message-based criteria for the giveaway. messages_required: Optional[int] The number of messages required to participate in the giveaway. allowed_message_channels: Optional[List[int]] The ID of the channels where the message count is tracked. amari: Optional[int] The required Amari XP to participate in the giveaway. weekly_amari: Optional[int] The required weekly Amari XP to participate in the giveaway. """ __slots__ = ( "bot", "guild_id", "channel_id", "message_id", "extra_message_id", "prize", "host_id", "donor_id", "winner_count", "winners", "participants", "ended", "ends", "required_roles", "blacklisted_roles", "bypass_roles", "multiplier_roles", "messages", "messages_required", "allowed_message_channels", "amari", "weekly_amari", ) def __init__(self, *, bot: Giftify, record: asyncpg.Record): self.bot = bot self.guild_id: int = record["guild"] self.channel_id: int = record["channel"] self.message_id: int = record["message"] self.extra_message_id: int = record["extra_message"] self.prize: str = record["prize"] self.host_id: int = record["host"] self.donor_id: Optional[int] = record["donor"] self.winner_count: int = record["winner_count"] self.winners: List[int] = record["winners"] self.participants: List[int] = record["participants"] self.ended: bool = record["ended"] self.ends: datetime.datetime = record["ends"] self.required_roles: List[int] = record["required_roles"] or [] self.blacklisted_roles: List[int] = record["blacklisted_roles"] or [] self.bypass_roles: List[int] = record["bypass_roles"] or [] self.multiplier_roles: Dict[int, int] = { int(role): entries for role, entries in record["multiplier_roles"].items() if entries > 1 } self.messages: Dict[int, int] = { int(member): messages for member, messages in record["messages"].items() } self.messages_required: Optional[int] = record["messages_required"] self.allowed_message_channels: Optional[List[int]] = record["messages_channel"] self.amari: Optional[int] = record["amari"] self.weekly_amari: Optional[int] = record["weekly_amari"] def __eq__(self, other: "Giveaway") -> bool: try: return ( self.guild_id == other.guild_id and self.channel_id == other.channel_id and self.message_id == other.message_id ) except AttributeError: return False def __hash__(self) -> int: return hash((self.guild_id, self.channel_id, self.message_id)) def __repr__(self) -> str: return f"<Giveaway guild_id={self.guild_id} channel_id={self.channel_id} message_id={self.message_id}>" @property def jump_to_giveaway(self) -> discord.ui.View: url = f"https://discord.com/channels/{self.guild_id}/{self.channel_id}/{self.message_id}" view = BaseView(timeout=None) button = discord.ui.Button(label="Jump To Giveaway", url=url) view.add_item(button) return view @staticmethod def create_embed( interaction: Interaction, config: GuildConfig, duration: datetime.datetime, winners: int, prize: str, required_roles: Optional[List[discord.Role]] = None, blacklisted_roles: Optional[List[discord.Role]] = None, bypass_roles: Optional[List[discord.Role]] = None, multiplier_roles: Optional[Dict[discord.Role, int]] = None, messages_required: Optional[int] = None, allowed_message_channels: Optional[List[discord.TextChannel]] = None, amari: Optional[int] = None, weekly_amari: Optional[int] = None, donor: Optional[discord.Member] = None, ) -> discord.Embed: assert interaction.guild is not None description = f"Click the {config.reaction} button to join the giveaway!\n" description += f"Hosted By: {interaction.user.mention}\n" if donor: description += f"Donor: {donor.mention}\n" description += f"Ends: {discord.utils.format_dt(duration, style='R')} ({discord.utils.format_dt(duration, style='f')})\n" embed = discord.Embed( title=prize, description=description, colour=config.color, timestamp=duration, ) embed.set_footer( text=f"{winners} winner(s) • Ends", icon_url=interaction.guild.icon or interaction.client.user.display_avatar, ) requirements = "" if required_roles: requirements += f"Required Roles: {', '.join(role.mention for role in required_roles if role is not None)}\n" if bypass_roles: requirements += f"Bypass Roles: {', '.join(role.mention for role in bypass_roles if role is not None)}\n" if blacklisted_roles: requirements += f"Blacklisted Roles: {', '.join(role.mention for role in blacklisted_roles if role is not None)}\n" if messages_required: requirements += ( f"Messages Required: **{messages_required}** message(s) (5s cooldown)\n" ) if allowed_message_channels: requirements += f"Allowed Channels: {', '.join(f'<#{c.id}>' for c in allowed_message_channels)}\n" if amari: requirements += f"Amari Level: {amari}\n" if weekly_amari: requirements += f"Weekly Amari: {weekly_amari} XP Points\n" if requirements: embed.add_field(name="Requirements", value=requirements, inline=False) if multiplier_roles: multiplier_roles_mention = "\n".join( [ f"- {entry}x ・ {role.mention}" for role, entry in multiplier_roles.items() if role is not None ] ) embed.add_field( name="Bonus Entries", value=multiplier_roles_mention, inline=False ) return embed @classmethod async def start( cls, interaction: Interaction, duration: datetime.datetime, winners: int, prize: str, config: GuildConfig, channel_config: Optional[ChannelConfig], required_roles: Optional[List[discord.Role]] = None, blacklisted_roles: Optional[List[discord.Role]] = None, bypass_roles: Optional[List[discord.Role]] = None, multiplier_roles: Optional[Dict[discord.Role, int]] = None, messages_required: Optional[int] = None, allowed_message_channels: Optional[List[discord.TextChannel]] = None, amari: Optional[int] = None, weekly_amari: Optional[int] = None, image: Optional[discord.Attachment] = None, donor: Optional[discord.Member] = None, ping: bool = False, message: Optional[str] = None, ): assert isinstance(interaction.channel, discord.TextChannel) assert interaction.guild is not None embed = cls.create_embed( interaction=interaction, config=config, duration=duration, winners=winners, prize=prize, required_roles=required_roles, blacklisted_roles=blacklisted_roles, bypass_roles=bypass_roles, multiplier_roles=multiplier_roles, messages_required=messages_required, allowed_message_channels=allowed_message_channels, amari=amari, weekly_amari=weekly_amari, donor=donor, ) view = GiveawayView( config.reaction, config.participants_reaction, config.button_style ) giveaway_message = await interaction.channel.send( config.gw_header, embed=embed, view=view ) message_embed = discord.Embed( title=f"{GIFT_EMOJI} Giveaway", description=f"**Message・** {message}" if message else None, color=config.color, ) if image: message_embed.set_image(url=image) extra_message = None if ping or image: ping_role = ( channel_config.ping if channel_config and channel_config.ping else config.ping ) extra_message = await interaction.channel.send( ping_role.mention if ping_role else "", embed=message_embed if message or image else None, # type: ignore allowed_mentions=discord.AllowedMentions(roles=True), ) if extra_message is None and message is not None: extra_message = await interaction.channel.send(embed=message_embed) await interaction.client.timer_cog.create_timer( message_id=giveaway_message.id, channel_id=interaction.channel.id, guild_id=interaction.guild.id, author_id=interaction.user.id, title="Giveaway", event="giveaway", expires=duration, pool=interaction.client.pool, ) return await cls.create_entry( bot=interaction.client, guild_id=interaction.guild.id, channel_id=interaction.channel.id, message_id=giveaway_message.id, prize=prize, host_id=interaction.user.id, donor_id=donor.id if donor else None, winner_count=winners, ends=duration, required_roles=[role.id for role in required_roles if role is not None] if required_roles else [], blacklisted_roles=[ role.id for role in blacklisted_roles if role is not None ] if blacklisted_roles else [], bypass_roles=[role.id for role in bypass_roles if role is not None] if bypass_roles else [], multiplier_roles={ role.id: entries for role, entries in multiplier_roles.items() if role is not None } if multiplier_roles else {}, messages={}, messages_required=messages_required, allowed_message_channels=[c.id for c in allowed_message_channels] if allowed_message_channels else [], extra_message_id=extra_message.id if extra_message else None, amari=amari, weekly_amari=weekly_amari, ) @classmethod async def create_entry( cls, bot: Giftify, guild_id: int, channel_id: int, message_id: int, prize: str, host_id: int, winner_count: int, ends: datetime.datetime, required_roles: List[int], blacklisted_roles: List[int], bypass_roles: List[int], donor_id: Optional[int], multiplier_roles: Optional[dict], messages: Optional[dict], messages_required: Optional[int], allowed_message_channels: Optional[List[int]], extra_message_id: Optional[int], amari: Optional[int], weekly_amari: Optional[int], ) -> "Giveaway": """ Create a new Giveaway object and insert it into the database. Parameters ---------- bot: Giftify The bot instance. guild_id: int The ID of the guild (server) where the giveaway is hosted. channel_id: int The ID of the channel where the giveaway is hosted. message_id: int The ID of the message having the giveaway view. prize: str The prize of the giveaway. host_id: int The ID of the user hosting the giveaway. donor_id: int The ID of the donor of the giveaway. winner_count: int The number of winners for the giveaway. ends: datetime.datetime The time when the giveaway ends. required_roles: List[int] The list of role IDs required to participate in the giveaway. blacklisted_roles: List[int] The list of role IDs excluded from participating in the giveaway. bypass_roles: List[int] The list of user IDs exempted from giveaway restrictions. multiplier_roles: Optional[dict] A dictionary containing multiplier_roles criteria for the giveaway. messages: Optional[dict] A dictionary containing message-based criteria for the giveaway. messages_required: Optional[int] The number of messages required to participate in the giveaway. allowed_message_channels: Optional[int] The ID of the channel where the message count is tracked. amari: Optional[int] The required Amari XP to participate in the giveaway. weekly_amari: Optional[int] The required weekly Amari XP to participate in the giveaway. Returns ------- Giveaway The created Giveaway object. """ record = await bot.pool.fetchrow( "INSERT INTO giveaways (guild, channel, message, extra_message, host, donor, prize, winner_count, ends, required_roles, blacklisted_roles, bypass_roles, multiplier_roles, messages, messages_required, messages_channel, amari, weekly_amari) " "VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18) " "RETURNING *", guild_id, channel_id, message_id, extra_message_id, host_id, donor_id, prize, winner_count, ends, required_roles, blacklisted_roles, bypass_roles, multiplier_roles, messages, messages_required, allowed_message_channels, amari, weekly_amari, ) return cls(bot=bot, record=record) async def check_requirements(self, member: discord.Member) -> None: missing_roles = [ role.mention for role_id in self.required_roles if (role := member.guild.get_role(role_id)) and role not in member.roles ] if missing_roles:
raise GiveawayError(
3
2023-11-09 15:00:15+00:00
12k
Zjy0401/CoCoFormer
train.py
[ { "identifier": "create_jsf_datasets", "path": "dataset/jsf.py", "snippet": "def create_jsf_datasets(dataset_root, max_seq, random_seq=True):\n\n train_root = os.path.join(dataset_root, \"train\")\n # val_root = os.path.join(dataset_root, \"val\")\n test_root = os.path.join(dataset_root, \"test\")\n\n train_dataset = MultiJSFDataset(train_root, max_seq, random_seq)\n # val_dataset = JSFDataset(val_root, max_seq, random_seq)\n test_dataset = MultiJSFDataset(test_root, max_seq, random_seq)\n\n return train_dataset, test_dataset" }, { "identifier": "CoCoformer", "path": "model/CoCoFormer.py", "snippet": "class CoCoformer(nn.Module):\n\n def __init__(self, word2event, event2word, n_layers=6, num_heads=8, d_model=512, dim_feedforward=1024,\n dropout=0.1, max_sequence=2048, c_max_seq=256, b_max_seq=1024, rpr=False):\n super(CoCoformer, self).__init__()\n\n self.dummy = DummyDecoder()\n self.nlayers = n_layers\n self.nhead = num_heads\n self.d_model = d_model\n self.d_ff = dim_feedforward\n self.dropout = dropout\n self.max_seq = max_sequence\n self.c_max_seq = c_max_seq\n self.b_max_seq = b_max_seq\n self.rpr = rpr\n # word2event and event2word:\n self.word2event = word2event\n self.event2word = event2word\n\n # past layer of chord\n self.cpast_layer_dmodel = d_model\n self.cpast_layer_nhead = 8\n self.cpast_dim_forward = 256\n self.cpast_layer_max_seq = 256\n self.cpast_layer_nlayers = 1\n\n # past layer of beats\n self.bpast_layer_dmodel = d_model\n self.bpast_layer_nhead = 8\n self.bpast_dim_forward = 256\n self.bpast_layer_max_seq = 1024\n self.bpast_layer_nlayers = 1\n\n # Input embedding\n self.n_embedding = nn.Embedding(VOCAB_SIZE, self.d_model)\n self.c_embedding = nn.Embedding(VOCAB_SIZE, self.cpast_layer_dmodel)\n self.b_embedding = nn.Embedding(VOCAB_SIZE, self.bpast_layer_dmodel)\n # Positional encoding\n self.n_positional_encoding = PositionalEncoding(self.d_model, self.dropout, self.max_seq)\n self.c_positional_encoding = PositionalEncoding(self.cpast_layer_dmodel, self.dropout, self.cpast_layer_max_seq)\n self.b_positional_encoding = PositionalEncoding(self.bpast_layer_dmodel, self.dropout, self.bpast_layer_max_seq)\n\n # Base transformer\n if not self.rpr:\n # To make a decoder-only transformer we need to use masked encoder layers\n # Dummy decoder to essentially just return the encoder output\n encoder_norm = LayerNorm(self.d_model)\n encoder_past_layer = TransformerEncoderPastLayer(self.cpast_layer_dmodel, self.cpast_layer_nhead,\n self.cpast_dim_forward, self.bpast_layer_dmodel,\n self.bpast_layer_nhead, self.bpast_dim_forward,\n self.d_model, self.nhead,\n self.d_ff, self.dropout)\n encoder_layer = TransformerEncoderLayer(self.d_model, self.nhead, self.d_ff, self.dropout)\n encoder = TransformerEncoder(encoder_layer, self.nlayers, encoder_past_layer, self.max_seq, self.c_max_seq,\n self.b_max_seq, encoder_norm)\n self.transformer = nn.Transformer(\n d_model=self.d_model, nhead=self.nhead, num_encoder_layers=self.nlayers,\n num_decoder_layers=0, dropout=self.dropout, # activation=self.ff_activ,\n dim_feedforward=self.d_ff, custom_encoder=encoder, custom_decoder=self.dummy\n )\n # RPR Transformer\n elif self.rpr:\n encoder_norm = LayerNorm(self.d_model)\n encoder_layer = TransformerEncoderLayerRPR(self.d_model, self.nhead, self.d_ff, self.dropout,\n er_len=self.max_seq)\n encoder_past_layer = TransformerEncoderLayerRPR_(self.cpast_layer_dmodel, self.cpast_layer_nhead,\n self.cpast_dim_forward, self.bpast_layer_dmodel,\n self.bpast_layer_nhead, self.bpast_dim_forward,\n self.d_model, self.nhead,\n self.d_ff, self.dropout, er_len=self.max_seq)\n encoder = TransformerEncoderRPR(encoder_layer, self.nlayers, encoder_past_layer, self.max_seq,\n self.c_max_seq, self.b_max_seq, encoder_norm)\n\n self.transformer = nn.Transformer(\n d_model=self.d_model, nhead=self.nhead, num_encoder_layers=self.nlayers,\n num_decoder_layers=0, dropout=self.dropout, # activation=self.ff_activ,\n dim_feedforward=self.d_ff, custom_decoder=self.dummy, custom_encoder=encoder\n )\n\n # Final output is a softmaxed linear layer\n # TODO: verify the size of linear\n self.Norm1 = nn.LayerNorm(1024)\n self.ReLU = nn.ReLU()\n self.Norm2 = nn.LayerNorm(181)\n self.Dropout = nn.Dropout(dropout)\n self.transLinear = nn.Linear(256, 256)\n self.Wout1 = nn.Linear(self.d_model, 1024)\n self.Wout2 = nn.Linear(1024, 1024)\n self.Wout3 = nn.Linear(1024, VOCAB_SIZE)\n self.softmax = nn.Softmax(dim=-1)\n\n def _reset_parameters(self):\n r\"\"\"Initiate parameters in the transformer model.\"\"\"\n\n for p in self.parameters():\n if p.dim() > 1:\n xavier_uniform_(p)\n\n # forward\n def forward(self, x1, x2, x3, mask=True):\n\n args = parse_train_args()\n # for pure-Transformer:\n # Transformer module:\n if mask is True:\n if args.gpu[0] != -1:\n mask = self.transformer.generate_square_subsequent_mask(x1.shape[1]).cuda(device=args.gpu[0])\n else:\n mask = self.transformer.generate_square_subsequent_mask(x1.shape[1]).cpu()\n else:\n mask = None\n # Input shape is (max_seq, batch_size, d_model)\n x_n = self.n_embedding(x1)\n x_n = x_n.permute(1, 0, 2)\n x_n = self.n_positional_encoding(x_n)\n\n x_c = self.c_embedding(x2)\n x_c = x_c.permute(1, 0, 2)\n x_c = self.c_positional_encoding(x_c)\n\n x_b = self.b_embedding(x3)\n x_b = x_b.permute(1, 0, 2)\n x_b = self.b_positional_encoding(x_b)\n\n # Since there are no true decoder layers, the tgt is unused\n # Pytorch wants src and tgt to have some equal dims however\n x_out = self.transformer(src=torch.cat((x_n, x_c, x_b), dim=0), tgt=x_n,\n src_mask=mask)\n # x_out = self.transformer(src=x_transformer, tgt=x_transformer, src_mask=mask)\n # Back to (batch_size, max_seq, d_model)\n x_out = x_out.permute(1, 0, 2)\n\n # concat\n # x_concat = torch.cat([x_out, x_out2], dim=1)\n y = self.Dropout(self.Norm1(self.ReLU(self.Wout1(x_out))))\n y = self.Dropout(self.Norm1(self.ReLU(self.Wout2(y))))\n y = self.Wout3(y)\n # y = self.Wout2(y)\n # y = self.softmax(y)\n\n del mask\n\n # They are trained to predict the next note in sequence (we don't need the last one)\n return y\n\n # unconditional generate\n def generate(self, primer=None, target_seq_length=1024, beam=0, beam_chance=1.0):\n\n assert (not self.training), \"Cannot generate while in training mode\"\n\n print(\"Generating sequence of max length:\", target_seq_length)\n\n gen_seq = torch.full((1, target_seq_length), TOKEN_PAD, dtype=TORCH_LABEL_TYPE, device=get_device())\n\n num_primer = len(primer)\n gen_seq[..., :num_primer] = primer.type(TORCH_LABEL_TYPE).to(get_device())\n\n # print(\"primer:\",primer)\n # print(gen_seq)\n cur_i = num_primer\n while cur_i < target_seq_length:\n # gen_seq_batch = gen_seq.clone()\n y = self.softmax(self.forward(gen_seq[..., :cur_i]))[..., :len(self.word2event)]\n token_probs = y[:, cur_i - 1, :]\n\n if beam == 0:\n beam_ran = 2.0\n else:\n beam_ran = random.uniform(0, 1)\n\n if beam_ran <= beam_chance:\n token_probs = token_probs.flatten()\n top_res, top_i = torch.topk(token_probs, beam)\n\n beam_rows = top_i // VOCAB_SIZE\n beam_cols = top_i % VOCAB_SIZE\n\n gen_seq = gen_seq[beam_rows, :]\n gen_seq[..., cur_i] = beam_cols\n\n else:\n distrib = torch.distributions.categorical.Categorical(probs=token_probs)\n next_token = distrib.sample()\n # print(\"next token:\",next_token)\n gen_seq[:, cur_i] = next_token\n\n # Let the transformer decide to end if it wants to\n # if next_token == TOKEN_END:\n # print(\"Model called end of sequence at:\", cur_i, \"/\", target_seq_length)\n # break\n\n cur_i += 1\n if cur_i % 50 == 0:\n print(cur_i, \"/\", target_seq_length)\n\n return gen_seq[:, :cur_i]\n\n # conditional generate\n def conditional_generate(self, beats, chord, seq, c, bs, ba, bt, bb, target_seq_length=1024, beam=0, beam_chance=1.0):\n\n assert (not self.training), \"Cannot generate while in training mode\"\n print(\"Generating sequence of max length:\", target_seq_length)\n chord = torch.tensor(chord, device=get_device()).unsqueeze(0)\n beats = torch.tensor(beats, device=get_device()).unsqueeze(0)\n\n gen_seq = torch.full((1, target_seq_length), TOKEN_PAD, dtype=TORCH_LABEL_TYPE, device=get_device())\n primer = torch.tensor([c[0], bs[0], seq[0], ba[0]])\n primer_num = 1 # decide key to add\n num_primer = len(primer)\n gen_seq[..., :num_primer] = primer.type(TORCH_LABEL_TYPE).to(get_device())\n\n # print(\"primer:\",primer)\n # print(gen_seq)\n cur_i = num_primer\n # first input: C B N B\n cur_i_n = 1\n cur_i_b = 2\n cur_i_c = 1\n check_error = 0\n pbar = tqdm(total=len(seq)*9)\n while cur_i < target_seq_length:\n a = gen_seq[..., :cur_i].cpu().numpy()\n # gen_seq_batch = gen_seq.clone()\n # print(\"input:\", gen_seq[..., :cur_i], chord[..., :cur_i_c], beats[..., :cur_i_b])\n y = self.softmax(self.forward(gen_seq[..., :cur_i], chord[..., :cur_i_c],\n beats[..., :cur_i_b]))[..., :len(self.word2event)]\n token_probs = y[:, cur_i - 1, :]\n # check for y\n distrib = torch.distributions.categorical.Categorical(probs=token_probs)\n next_token = distrib.sample()\n if check_error > 256:\n print(\"error! regenerate!\")\n return False\n # next token is the next token\n if cur_i % 9 == 1: # token is chord, next token must be beats\n if not 178 < next_token < 191: # if it is not beat\n check_error += 1\n continue\n if cur_i % 9 in [2, 4, 6, 8]: # this token must be beat, next token must be note\n if not next_token < 129: # if it is not note\n check_error += 1\n continue\n else: # this token must be note, next token must be chord or beat\n if not 128 < next_token < 191: # if it is chord or beat\n check_error += 1\n continue\n\n if beam == 0:\n beam_ran = 2.0\n else:\n beam_ran = random.uniform(0, 1)\n\n if beam_ran <= beam_chance:\n token_probs = token_probs.flatten()\n top_res, top_i = torch.topk(token_probs, beam)\n\n beam_rows = top_i // VOCAB_SIZE\n beam_cols = top_i % VOCAB_SIZE\n\n gen_seq = gen_seq[beam_rows, :]\n gen_seq[..., cur_i] = beam_cols\n\n else:\n # print(\"next token:\",next_token)\n gen_seq[:, cur_i] = next_token\n cur_i += 1\n pbar.update(1)\n cur_i_n += 1\n if cur_i % 9 == 0 and primer_num < len(seq):\n # add C B_S N_S B_A\n gen_seq[:, cur_i] = chord.squeeze()[primer_num]\n gen_seq[:, cur_i+1] = torch.tensor(bs[primer_num], device=get_device())\n gen_seq[:, cur_i+2] = torch.tensor(seq[primer_num], device=get_device())\n gen_seq[:, cur_i+3] = torch.tensor(ba[primer_num], device=get_device())\n primer_num += 1\n cur_i += 4\n pbar.update(4)\n cur_i_n += 1\n cur_i_b += 2\n cur_i_c += 1\n # a = gen_seq[..., :cur_i].cpu().numpy()\n if cur_i % 9 != 0 and cur_i % 9 != 4 and primer_num < len(seq) + 1:\n # add B\n gen_seq[:, cur_i] = beats.squeeze()[cur_i_b]\n cur_i_b += 1\n cur_i_n += 1\n cur_i += 1\n pbar.update(1)\n # a = gen_seq[..., :cur_i].cpu().numpy()\n if primer_num == len(seq) and cur_i == len(seq) * 9:\n print(\"Model called end of sequence at:\", cur_i, \"/\", target_seq_length)\n break\n # print(cur_i, \"/\", target_seq_length)\n\n print(\"all errors:%d\" % check_error)\n return gen_seq[:, :cur_i]" }, { "identifier": "Discriminator", "path": "model/CoCoFormer.py", "snippet": "class Discriminator(nn.Module):\n \"\"\"\n to judge the true sample or fake\n return fake or true\n \"\"\"\n def __init__(self, input_emb=1, d_model=256, nhead=4, d_ff=512, dropout=0.5, out_emb=1024):\n super(Discriminator, self).__init__()\n self.linear1 = nn.Linear(input_emb, d_model)\n self.transformer = TransformerEncoderLayer(d_model, nhead, d_ff, dropout)\n self.linear2 = nn.Linear(d_model, out_emb)\n self.relu = nn.LeakyReLU(negative_slope=0.01, inplace=False)\n self.maxpool = nn.AdaptiveMaxPool1d(1)\n self.Norm1 = nn.LayerNorm(d_model)\n self.Norm2 = nn.LayerNorm(out_emb)\n self.dropout = nn.Dropout(dropout)\n self.sigmoid = nn.Sigmoid()\n self.loss = nn.BCELoss()\n\n def forward(self, x, labels):\n x = x.float().unsqueeze(2)\n x = self.dropout(self.Norm1(self.linear1(x)))\n x = self.transformer(x)\n logits = self.dropout(self.Norm2(self.linear2(x)))\n logits = self.sigmoid(self.relu(self.maxpool(logits)))\n logits = logits.reshape(logits.shape[0] * logits.shape[1], -1)\n labels = labels.reshape(logits.shape[0] * logits.shape[1], -1)\n loss = self.loss(logits, labels)\n\n # import numpy as np\n # logits = logits.cpu().detach().numpy()\n # labels = labels.cpu().detach().numpy()\n # loss = []\n # for i in logits:\n # loss.append(np.log(1-1/(1+np.exp(i[0]))))\n output = (loss, logits)\n\n return output\n\n def _reset_parameters(self):\n\n for p in self.parameters():\n if p.dim() > 1:\n xavier_uniform_(p)" }, { "identifier": "PureTransformer", "path": "model/CoCoFormer.py", "snippet": "class PureTransformer(nn.Module):\n\n def __init__(self, word2event, event2word, n_layers=6, num_heads=8, d_model=512, dim_feedforward=1024,\n dropout=0.1, max_sequence=2048, c_max_seq=256, b_max_seq=1024, rpr=False):\n super(PureTransformer, self).__init__()\n self.dummy = DummyDecoder()\n self.nlayers = n_layers\n self.nhead = num_heads\n self.d_model = d_model\n self.d_ff = dim_feedforward\n self.dropout = dropout\n self.max_seq = max_sequence\n self.rpr = rpr\n # word2event and event2word:\n self.word2event = word2event\n self.event2word = event2word\n # Input embedding\n self.embedding = nn.Embedding(VOCAB_SIZE, self.d_model)\n\n # Positional encoding\n self.positional_encoding = PositionalEncoding(self.d_model, self.dropout, self.max_seq)\n\n self.transformer = nn.Transformer(\n d_model=self.d_model, nhead=self.nhead, num_encoder_layers=self.nlayers,\n num_decoder_layers=0, dropout=self.dropout, # activation=self.ff_activ,\n dim_feedforward=self.d_ff, custom_decoder=self.dummy\n )\n\n # Final output is a softmaxed linear layer\n self.Wout = nn.Linear(self.d_model, VOCAB_SIZE)\n self.softmax = nn.Softmax(dim=-1)\n\n # forward\n def forward(self, x, mask=True):\n\n if mask is True:\n mask = self.transformer.generate_square_subsequent_mask(x[0].shape[1]).to(get_device())\n else:\n mask = None\n\n x = self.embedding(x)\n\n # Input shape is (max_seq, batch_size, d_model)\n x = x.permute(1, 0, 2)\n\n x = self.positional_encoding(x)\n\n # Since there are no true decoder layers, the tgt is unused\n # Pytorch wants src and tgt to have some equal dims however\n x_out = self.transformer(src=x, tgt=x, src_mask=mask)\n\n # Back to (batch_size, max_seq, d_model)\n x_out = x_out.permute(1, 0, 2)\n\n y = self.Wout(x_out)\n # y = self.softmax(y)\n\n del mask\n\n # They are trained to predict the next note in sequence (we don't need the last one)\n return y" }, { "identifier": "SmoothCrossEntropyLoss", "path": "model/loss.py", "snippet": "class SmoothCrossEntropyLoss(_Loss):\n \"\"\"\n https://arxiv.org/abs/1512.00567\n \"\"\"\n __constants__ = ['label_smoothing', 'vocab_size', 'ignore_index', 'reduction']\n\n def __init__(self, label_smoothing, vocab_size, ignore_index=-100, reduction='mean', is_logits=True):\n assert 0.0 <= label_smoothing <= 1.0\n super().__init__(reduction=reduction)\n\n self.label_smoothing = label_smoothing\n self.vocab_size = vocab_size\n self.ignore_index = ignore_index\n self.input_is_logits = is_logits\n\n def forward(self, input, target):\n \"\"\"\n Args:\n input: [B * T, V]\n target: [B * T]\n Returns:\n cross entropy: [1]\n \"\"\"\n mask = (target == self.ignore_index).unsqueeze(-1)\n q = F.one_hot(target.long(), self.vocab_size).type(torch.float32)\n u = 1.0 / self.vocab_size\n q_prime = (1.0 - self.label_smoothing) * q + self.label_smoothing * u\n q_prime = q_prime.masked_fill(mask, 0)\n\n ce = self.cross_entropy_with_logits(q_prime, input)\n if self.reduction == 'mean':\n lengths = torch.sum(target != self.ignore_index)\n return ce.sum() / lengths\n elif self.reduction == 'sum':\n return ce.sum()\n else:\n raise NotImplementedError\n\n def cross_entropy_with_logits(self, p, q):\n return -torch.sum(p * (q - q.logsumexp(dim=-1, keepdim=True)), dim=-1)" }, { "identifier": "get_device", "path": "utilities/device.py", "snippet": "def get_device():\n\n if((not USE_CUDA) or (TORCH_CUDA_DEVICE is None)):\n return TORCH_CPU_DEVICE\n else:\n return TORCH_CUDA_DEVICE" }, { "identifier": "use_cuda", "path": "utilities/device.py", "snippet": "def use_cuda(cuda_bool):\n\n global USE_CUDA\n USE_CUDA = cuda_bool" }, { "identifier": "LrStepTracker", "path": "utilities/lr_scheduling.py", "snippet": "class LrStepTracker:\n\n def __init__(self, model_dim=512, warmup_steps=4000, init_steps=0):\n # Store Values\n self.warmup_steps = warmup_steps\n self.model_dim = model_dim\n self.init_steps = init_steps\n\n # Begin Calculations\n self.invsqrt_dim = (1 / math.sqrt(model_dim))\n self.invsqrt_warmup = (1 / (warmup_steps * math.sqrt(warmup_steps)))\n\n # step\n def step(self, step):\n\n step += self.init_steps\n if(step <= self.warmup_steps):\n return self.invsqrt_dim * self.invsqrt_warmup * step\n else:\n invsqrt_step = (1 / math.sqrt(step))\n return self.invsqrt_dim * invsqrt_step" }, { "identifier": "get_lr", "path": "utilities/lr_scheduling.py", "snippet": "def get_lr(optimizer):\n\n for param_group in optimizer.param_groups:\n return param_group['lr']" }, { "identifier": "parse_train_args", "path": "utilities/argument_funcs.py", "snippet": "def parse_train_args():\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"-input_dir\", type=str, default=\"./dataset/dataset/JSF_SATB\", help=\"Folder of preprocessed and pickled midi files\")\n parser.add_argument(\"-output_dir\", type=str, default=\"./baseline_3loss\", help=\"Folder to save model weights. Saves one every epoch\")\n parser.add_argument(\"-weight_modulus\", type=int, default=1, help=\"How often to save epoch weights (ex: value of 10 means save every 10 epochs)\")\n parser.add_argument(\"-print_modulus\", type=int, default=1, help=\"How often to print train results for a batch (batch loss, learn rate, etc.)\")\n parser.add_argument(\"-word2event\", type=str, default='./dataset/word2event.pkl', help='word table location: *.pkl')\n parser.add_argument(\"-n_workers\", type=int, default=2, help=\"Number of threads for the dataloader\")\n parser.add_argument(\"--force_cpu\", action=\"store_true\", help=\"Forces model to run on a cpu even when gpu is available\")\n parser.add_argument(\"--gpu\", default=[2], nargs='+', type=int, help=\"For Multi-GPUs training\")\n parser.add_argument(\"--no_tensorboard\", action=\"store_true\", help=\"Turns off tensorboard result reporting\")\n parser.add_argument('--scheduled_sampling', default=False, help='False means use teacher forcing, True means use scheduled_sampling')\n parser.add_argument(\"--scheduled_sampling_change_ratio\", default=0.5, type=int, help='ratio about mix golden target with output')\n parser.add_argument(\"-continue_weights\", type=str, default=None, help=\"Model weights to continue training based on\")\n parser.add_argument(\"-continue_epoch\", type=int, default=None, help=\"Epoch the continue_weights model was at\")\n\n parser.add_argument(\"-lr\", type=float, default=None, help=\"Constant learn rate. Leave as None for a custom scheduler.\")\n parser.add_argument(\"-ce_smoothing\", type=float, default=None, help=\"Smoothing parameter for smoothed cross entropy loss (defaults to no smoothing)\")\n parser.add_argument(\"-batch_size\", type=int, default=2, help=\"Batch size per gpu to use\")\n parser.add_argument(\"-epochs\", type=int, default=300, help=\"Number of epochs to use\")\n\n parser.add_argument(\"-adv_train\", default=True, help='add discriminator loss')\n parser.add_argument(\"-only_Transformer\", default=False, help='use pure Transformer, default set to false, True only for test')\n parser.add_argument(\"-loss\", default=[0.4, 0.2, 0.8], nargs='+', type=float, help='weights of loss, the last element effect when adv train is True')\n\n parser.add_argument(\"--rpr\", action=\"store_true\", help=\"Use a modified Transformer for Relative Position Representations\")\n parser.add_argument(\"-max_sequence\", type=int, default=2048, help=\"Maximum midi sequence to consider\")\n parser.add_argument(\"-n_layers\", type=int, default=6, help=\"Number of decoder layers to use\")\n parser.add_argument(\"-num_heads\", type=int, default=8, help=\"Number of heads to use for multi-head attention\")\n parser.add_argument(\"-d_model\", type=int, default=512, help=\"Dimension of the model (output dim of embedding layers, etc.)\")\n\n parser.add_argument(\"-dim_feedforward\", type=int, default=1024, help=\"Dimension of the feedforward layer\")\n\n parser.add_argument(\"-dropout\", type=float, default=0.1, help=\"Dropout rate\")\n\n parser.add_argument(\"--metrics\", default=False, help=\"evaluate TER(token error rate)\")\n\n return parser.parse_args()" }, { "identifier": "print_train_args", "path": "utilities/argument_funcs.py", "snippet": "def print_train_args(args):\n\n print(SEPERATOR)\n print(\"input_dir:\", args.input_dir)\n print(\"output_dir:\", args.output_dir)\n print(\"weight_modulus:\", args.weight_modulus)\n print(\"print_modulus:\", args.print_modulus)\n print(\"\")\n print(\"n_workers:\", args.n_workers)\n print(\"force_cpu:\", args.force_cpu)\n print(\"tensorboard:\", not args.no_tensorboard)\n print(\"\")\n print(\"continue_weights:\", args.continue_weights)\n print(\"continue_epoch:\", args.continue_epoch)\n print(\"\")\n print(\"lr:\", args.lr)\n print(\"ce_smoothing:\", args.ce_smoothing)\n print(\"batch_size:\", args.batch_size)\n print(\"epochs:\", args.epochs)\n print(\"\")\n print(\"rpr:\", args.rpr)\n print(\"max_sequence:\", args.max_sequence)\n print(\"n_layers:\", args.n_layers)\n print(\"num_heads:\", args.num_heads)\n print(\"d_model:\", args.d_model)\n print(\"\")\n print(\"dim_feedforward:\", args.dim_feedforward)\n print(\"dropout:\", args.dropout)\n print(SEPERATOR)\n print(\"\")" }, { "identifier": "write_model_params", "path": "utilities/argument_funcs.py", "snippet": "def write_model_params(args, output_file):\n\n o_stream = open(output_file, \"w\")\n\n o_stream.write(\"rpr: \" + str(args.rpr) + \"\\n\")\n o_stream.write(\"lr: \" + str(args.lr) + \"\\n\")\n o_stream.write(\"ce_smoothing: \" + str(args.ce_smoothing) + \"\\n\")\n o_stream.write(\"batch_size: \" + str(args.batch_size) + \"\\n\")\n o_stream.write(\"max_sequence: \" + str(args.max_sequence) + \"\\n\")\n o_stream.write(\"n_layers: \" + str(args.n_layers) + \"\\n\")\n o_stream.write(\"num_heads: \" + str(args.num_heads) + \"\\n\")\n o_stream.write(\"d_model: \" + str(args.d_model) + \"\\n\")\n o_stream.write(\"dim_feedforward: \" + str(args.dim_feedforward) + \"\\n\")\n o_stream.write(\"dropout: \" + str(args.dropout) + \"\\n\")\n\n o_stream.close()" }, { "identifier": "train_epoch", "path": "utilities/run_model.py", "snippet": "def train_epoch(cur_epoch, model, dataloader, loss, opt, lr_scheduler=None, print_modulus=1):\n\n args = parse_train_args()\n\n model.train()\n for batch_num, batch in enumerate(dataloader):\n time_before = time.time()\n\n opt.zero_grad()\n x = batch[0]\n tgt = batch[1]\n for i in range(len(batch[0])):\n if args.gpu[0] != -1:\n if isinstance(x[i], list):\n for j in range(len(x[i])):\n x[i][j] = x[i][j].cuda(device=args.gpu[0])\n if isinstance(x[i], torch.Tensor):\n x[i] = x[i].cuda(device=args.gpu[0])\n\n if isinstance(tgt[i], list):\n for j in range(len(tgt[i])):\n tgt[i][j] = tgt[i][j].cuda(device=args.gpu[0])\n if isinstance(tgt[i], torch.Tensor):\n tgt[i] = tgt[i].cuda(device=args.gpu[0])\n else:\n if isinstance(x[i], list):\n for j in range(len(x[i])):\n x[i][j] = x[i][j].cpu()\n tgt[i][j] = tgt[i][j].cpu()\n tgt = tgt[0][0]\n tgt = tgt.flatten()\n # with torch.no_grad():\n # y1 = model(x[1])\n # y1 = y1.reshape(y1.shape[0] * y1.shape[1], -1)\n # loss1 = loss.forward(y1, tgt)\n y2 = model(x[0])\n # y3 = model(x[2])\n # train for only CT\n # y = model(x)\n\n y2 = y2.reshape(y2.shape[0] * y2.shape[1], -1)\n loss2 = loss.forward(y2, tgt)\n # tgt = tgt.flatten()\n # add scheduled sampling\n # out = loss.forward(y, tgt)\n\n loss2.backward()\n # out = args.loss[0] * loss1 + args.loss[1] * loss2\n\n opt.step()\n if lr_scheduler is not None:\n lr_scheduler.step()\n\n time_after = time.time()\n time_took = time_after - time_before\n\n if (batch_num + 1) % print_modulus == 0:\n print(\"Epoch\", cur_epoch, \" Batch\", batch_num + 1, \"/\", len(dataloader), \"LR:\", get_lr(opt),\n \"Train total loss:\", float(loss2),\n \"Time (s):\", time_took)\n\n return" }, { "identifier": "train_with_adv", "path": "utilities/run_model.py", "snippet": "def train_with_adv(cur_epoch, model, model_disc, dataloader, loss, opt, opt_disc,\n lr_scheduler=None, lr_disc_scheduler=None, print_modulus=1):\n\n args = parse_train_args()\n out = -1\n start_epoch = 5\n model.train()\n model_disc.train()\n for batch_num, batch in enumerate(dataloader):\n time_before = time.time()\n\n opt.zero_grad()\n opt_disc.zero_grad()\n x = batch[0]\n tgt = batch[1]\n for i in range(len(batch[0])):\n if args.gpu[0] != -1:\n if isinstance(x[i], list):\n for j in range(len(x[i])):\n x[i][j] = x[i][j].cuda(device=args.gpu[0])\n if isinstance(x[i], torch.Tensor):\n x[i] = x[i].cuda(device=args.gpu[0])\n\n if isinstance(tgt[i], list):\n for j in range(len(tgt[i])):\n tgt[i][j] = tgt[i][j].cuda(device=args.gpu[0])\n if isinstance(tgt[i], torch.Tensor):\n tgt[i] = tgt[i].cuda(device=args.gpu[0])\n else:\n if isinstance(x[i], list):\n for j in range(len(x[i])):\n x[i][j] = x[i][j].cpu()\n tgt[i][j] = tgt[i][j].cpu()\n tgt = tgt[0][0]\n tgt = tgt.flatten()\n with torch.no_grad():\n y1 = model.module(x[1][0], x[1][1], x[1][2])\n y1 = y1.reshape(y1.shape[0] * y1.shape[1], -1)\n loss1 = loss.forward(y1, tgt)\n y2 = model.module(x[0][0], x[0][1], x[0][2])\n # discriminator model loss:\n if args.gpu[0] != -1:\n real_disc_label = torch.ones(len(batch[0]), batch[1][0][0].shape[1], 1).to(args.gpu[0])\n fake_disc_label = torch.zeros(len(batch[0]), y2.shape[1], 1).to(args.gpu[0])\n else:\n real_disc_label = torch.ones(len(batch[0]), batch[1][0][0].shape[1], 1)\n fake_disc_label = torch.zeros(len(batch[0]), y2.shape[1], 1)\n\n softmax = nn.Softmax(dim=-1)\n d_fake_loss, d_fake_logits = model_disc(torch.argmax(softmax(y2), dim=-1), fake_disc_label)\n d_real_loss, d_real_logits = model_disc(batch[1][0][0], real_disc_label)\n loss3 = d_fake_loss + d_real_loss\n # y3 = model(x[2])\n # train for only CT\n # y = model(x)\n\n y2 = y2.reshape(y2.shape[0] * y2.shape[1], -1)\n loss2 = loss.forward(y2, tgt)\n # tgt = tgt.flatten()\n # add scheduled sampling\n # out = loss.forward(y, tgt)\n\n # out = loss3\n out = args.loss[0] * loss1 + args.loss[1] * loss2 + args.loss[2] * loss3\n\n out.backward()\n opt.step()\n opt_disc.step()\n if lr_scheduler is not None:\n lr_scheduler.step()\n if lr_disc_scheduler is not None:\n lr_disc_scheduler.step()\n\n time_after = time.time()\n time_took = time_after - time_before\n\n if (batch_num + 1) % print_modulus == 0:\n print(\"Epoch\", cur_epoch, \" Batch\", batch_num + 1, \"/\", len(dataloader), \"LR:\", get_lr(opt_disc),\n \"Train total loss:\", float(out), \"Train loss1:\", float(loss1), \"Train loss2:\", float(loss2),\n \"Train loss3:\", float(loss3), \"Time (s):\", time_took)\n\n return" }, { "identifier": "eval_model", "path": "utilities/run_model.py", "snippet": "def eval_model(model, dataloader, loss):\n\n model.eval()\n args = parse_train_args()\n avg_acc = -1\n avg_loss = -1\n with torch.set_grad_enabled(False):\n n_test = len(dataloader)\n sum_loss = 0.0\n sum_acc = 0.0\n for batch in tqdm.tqdm(dataloader):\n x = batch[0]\n tgt = batch[1]\n for i in range(len(batch[0])):\n if args.gpu[0] != -1:\n if isinstance(x[i], list):\n for j in range(len(x[i])):\n x[i][j] = x[i][j].cuda(device=args.gpu[0])\n if isinstance(x[i], torch.Tensor):\n x[i] = x[i].cuda(device=args.gpu[0])\n\n if isinstance(tgt[i], list):\n for j in range(len(tgt[i])):\n tgt[i][j] = tgt[i][j].cuda(device=args.gpu[0])\n if isinstance(tgt[i], torch.Tensor):\n tgt[i] = tgt[i].cuda(device=args.gpu[0])\n else:\n x[i] = x[i].cpu()\n tgt[i] = tgt[i].cpu()\n tgt = tgt[0][0]\n tgt = tgt.flatten()\n\n # with torch.no_grad():\n # y1 = model(x[0])\n # y1 = y1.reshape(y1.shape[0] * y1.shape[1], -1)\n # loss1 = loss.forward(y1, tgt)\n y2 = model.module(x[0][0], x[0][1], x[0][2])\n y2 = y2.reshape(y2.shape[0] * y2.shape[1], -1)\n loss2 = loss.forward(y2, tgt)\n out = loss2\n\n sum_acc += float(compute_jsf_accuracy(y2, tgt))\n\n # y = y.reshape(y.shape[0] * y.shape[1], -1)\n # tgt = tgt.flatten()\n\n # out = loss.forward(y, tgt)\n\n sum_loss += float(out)\n\n avg_loss = sum_loss / n_test\n avg_acc = sum_acc / n_test\n\n return avg_loss, avg_acc" }, { "identifier": "get_metrics", "path": "utilities/run_model.py", "snippet": "def get_metrics(model, dataloader):\n \"\"\"\n Calculate TER: token error rate\n \"\"\"\n args = parse_eval_args()\n model.eval()\n # TER\n with torch.set_grad_enabled(False):\n n_test = len(dataloader)\n c_acc, Ns_acc, Bs_acc, Na_acc, Ba_acc, Nt_acc, Bt_acc, Nb_acc, Bb_acc = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0\n ter = []\n for batch in tqdm.tqdm(dataloader):\n x = batch[0]\n tgt = batch[1]\n for i in range(len(batch[0])):\n if args.gpu[0] != -1:\n if isinstance(x[i], list):\n for j in range(len(x[i])):\n x[i][j] = x[i][j].cuda(device=args.gpu[0])\n if isinstance(x[i], torch.Tensor):\n x[i] = x[i].cuda(device=args.gpu[0])\n\n if isinstance(tgt[i], list):\n for j in range(len(tgt[i])):\n tgt[i][j] = tgt[i][j].cuda(device=args.gpu[0])\n if isinstance(tgt[i], torch.Tensor):\n tgt[i] = tgt[i].cuda(device=args.gpu[0])\n else:\n if isinstance(x[i], list):\n for j in range(len(x[i])):\n x[i][j] = x[i][j].cpu()\n tgt[i][j] = tgt[i][j].cpu()\n\n y = model.module(x[0][0], x[0][1], x[0][2])\n # TER\n ter.append(compute_jsf_ter(y, tgt))\n\n for i in ter:\n c_acc += i[0]\n Bs_acc += i[1]\n Ns_acc += i[2]\n Ba_acc += i[3]\n Na_acc += i[4]\n Bt_acc += i[5]\n Nt_acc += i[6]\n Bb_acc += i[7]\n Nb_acc += i[8]\n TER = [c_acc / n_test, Bs_acc / n_test, Ns_acc / n_test, Ba_acc / n_test, Na_acc / n_test,\n Bt_acc / n_test, Nt_acc / n_test, Bb_acc / n_test, Nb_acc / n_test]\n # clear nan , or np.mean will only be nan if one is nan\n return TER" }, { "identifier": "train_with_pure_transformer", "path": "utilities/run_model.py", "snippet": "def train_with_pure_transformer(cur_epoch, model, dataloader, loss, opt, lr_scheduler=None, print_modulus=1):\n\n args = parse_train_args()\n\n model.train()\n for batch_num, batch in enumerate(dataloader):\n time_before = time.time()\n\n opt.zero_grad()\n\n x = batch[0][0][0].to(args.gpu[0])\n tgt = batch[1][0][0].to(args.gpu[0])\n\n y = model(x)\n\n y = y.reshape(y.shape[0] * y.shape[1], -1)\n tgt = tgt.flatten()\n\n out = loss.forward(y, tgt)\n\n out.backward()\n opt.step()\n\n if lr_scheduler is not None:\n lr_scheduler.step()\n\n time_after = time.time()\n time_took = time_after - time_before\n\n if (batch_num + 1) % print_modulus == 0:\n print(\"Epoch\", cur_epoch, \" Batch\", batch_num + 1, \"/\", len(dataloader), \"LR:\", get_lr(opt),\n \"Train loss:\", float(out), \"Time (s):\", time_took)\n\n return" }, { "identifier": "params", "path": "utilities/run_model.py", "snippet": "def params(dataloader, model, model_disc):\n\n args = parse_train_args()\n model.eval()\n for batch_num, batch in enumerate(dataloader):\n flops, params = profile(model.module, (batch[0][0][0].cuda(args.gpu[0]),\n batch[0][0][1].cuda(args.gpu[0]),\n batch[0][0][2].cuda(args.gpu[0]))\n )\n print('flops:', flops, 'params:', params)\n break" } ]
import os import csv import shutil import torch import torch.nn as nn import pickle from thop import profile from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from torch.optim import Adam from dataset.jsf import create_jsf_datasets from model.CoCoFormer import CoCoformer, Discriminator, PureTransformer from model.loss import SmoothCrossEntropyLoss from utilities.constants import * from utilities.device import get_device, use_cuda from utilities.lr_scheduling import LrStepTracker, get_lr from utilities.argument_funcs import parse_train_args, print_train_args, write_model_params from utilities.run_model import train_epoch, train_with_adv, eval_model, get_metrics, train_with_pure_transformer, params from tensorboardX import SummaryWriter
10,336
# from dataset.e_piano import create_epiano_datasets, compute_epiano_accuracy, split_train_test CSV_HEADER = ["Epoch", "Learn rate", "Avg Train loss", "Train Accuracy", "Avg Eval loss", "Eval accuracy"] # Baseline is an untrained epoch that we evaluate as a baseline loss and accuracy BASELINE_EPOCH = -1 # main def main(): """ ---------- Author: Damon Gwinn ---------- Entry point. Trains a model specified by command line arguments ---------- """ args = parse_train_args()
# from dataset.e_piano import create_epiano_datasets, compute_epiano_accuracy, split_train_test CSV_HEADER = ["Epoch", "Learn rate", "Avg Train loss", "Train Accuracy", "Avg Eval loss", "Eval accuracy"] # Baseline is an untrained epoch that we evaluate as a baseline loss and accuracy BASELINE_EPOCH = -1 # main def main(): """ ---------- Author: Damon Gwinn ---------- Entry point. Trains a model specified by command line arguments ---------- """ args = parse_train_args()
print_train_args(args)
10
2023-11-01 08:33:08+00:00
12k
serl-robot/serl
serl/agents/vice/vice_learner.py
[ { "identifier": "batched_random_crop", "path": "serl/utils/augmentations.py", "snippet": "def batched_random_crop(key, obs, pixel_key, padding=4):\n imgs = obs[pixel_key]\n keys = jax.random.split(key, imgs.shape[0])\n imgs = jax.vmap(random_crop, (0, 0, None))(keys, imgs, padding)\n return obs.copy(add_or_replace={pixel_key: imgs})" }, { "identifier": "SACLearner", "path": "serl/agents/sac/sac_learner.py", "snippet": "class SACLearner(Agent):\n critic: TrainState\n target_critic: TrainState\n temp: TrainState\n tau: float\n discount: float\n target_entropy: float\n num_qs: int = struct.field(pytree_node=False)\n num_min_qs: Optional[int] = struct.field(\n pytree_node=False\n ) # See M in RedQ https://arxiv.org/abs/2101.05982\n backup_entropy: bool = struct.field(pytree_node=False)\n\n @classmethod\n def create(\n cls,\n seed: int,\n observation_space: gym.Space,\n action_space: gym.Space,\n actor_lr: float = 3e-4,\n critic_lr: float = 3e-4,\n temp_lr: float = 3e-4,\n hidden_dims: Sequence[int] = (256, 256),\n discount: float = 0.99,\n tau: float = 0.005,\n num_qs: int = 2,\n num_min_qs: Optional[int] = None,\n critic_dropout_rate: Optional[float] = None,\n critic_layer_norm: bool = False,\n target_entropy: Optional[float] = None,\n init_temperature: float = 1.0,\n backup_entropy: bool = True,\n ):\n \"\"\"\n An implementation of the version of Soft-Actor-Critic described in https://arxiv.org/abs/1812.05905\n \"\"\"\n\n action_dim = action_space.shape[-1]\n observations = observation_space.sample()\n actions = action_space.sample()\n\n if target_entropy is None:\n target_entropy = -action_dim / 2\n\n rng = jax.random.PRNGKey(seed)\n rng, actor_key, critic_key, temp_key = jax.random.split(rng, 4)\n\n actor_base_cls = partial(MLP, hidden_dims=hidden_dims, activate_final=True)\n actor_def = TanhNormal(actor_base_cls, action_dim)\n actor_params = actor_def.init(actor_key, observations)[\"params\"]\n actor = TrainState.create(\n apply_fn=actor_def.apply,\n params=actor_params,\n tx=optax.adam(learning_rate=actor_lr),\n )\n\n critic_base_cls = partial(\n MLP,\n hidden_dims=hidden_dims,\n activate_final=True,\n dropout_rate=critic_dropout_rate,\n use_layer_norm=critic_layer_norm,\n )\n critic_cls = partial(StateActionValue, base_cls=critic_base_cls)\n critic_def = Ensemble(critic_cls, num=num_qs)\n critic_params = critic_def.init(critic_key, observations, actions)[\"params\"]\n critic = TrainState.create(\n apply_fn=critic_def.apply,\n params=critic_params,\n tx=optax.adam(learning_rate=critic_lr),\n )\n\n target_critic_def = Ensemble(critic_cls, num=num_min_qs or num_qs)\n target_critic = TrainState.create(\n apply_fn=target_critic_def.apply,\n params=critic_params,\n tx=optax.GradientTransformation(lambda _: None, lambda _: None),\n )\n\n temp_def = Temperature(init_temperature)\n temp_params = temp_def.init(temp_key)[\"params\"]\n temp = TrainState.create(\n apply_fn=temp_def.apply,\n params=temp_params,\n tx=optax.adam(learning_rate=temp_lr),\n )\n\n return cls(\n rng=rng,\n actor=actor,\n critic=critic,\n target_critic=target_critic,\n temp=temp,\n target_entropy=target_entropy,\n tau=tau,\n discount=discount,\n num_qs=num_qs,\n num_min_qs=num_min_qs,\n backup_entropy=backup_entropy,\n )\n\n def update_actor(self, batch: DatasetDict) -> Tuple[Agent, Dict[str, float]]:\n key, rng = jax.random.split(self.rng)\n key2, rng = jax.random.split(rng)\n\n def actor_loss_fn(actor_params) -> Tuple[jnp.ndarray, Dict[str, float]]:\n dist = self.actor.apply_fn({\"params\": actor_params}, batch[\"observations\"])\n actions = dist.sample(seed=key)\n log_probs = dist.log_prob(actions)\n qs = self.critic.apply_fn(\n {\"params\": self.critic.params},\n batch[\"observations\"],\n actions,\n True,\n rngs={\"dropout\": key2},\n ) # training=True\n q = qs.mean(axis=0)\n actor_loss = (\n log_probs * self.temp.apply_fn({\"params\": self.temp.params}) - q\n ).mean()\n return actor_loss, {\"actor_loss\": actor_loss, \"entropy\": -log_probs.mean()}\n\n grads, actor_info = jax.grad(actor_loss_fn, has_aux=True)(self.actor.params)\n actor = self.actor.apply_gradients(grads=grads)\n\n return self.replace(actor=actor, rng=rng), actor_info\n\n def update_temperature(self, entropy: float) -> Tuple[Agent, Dict[str, float]]:\n def temperature_loss_fn(temp_params):\n temperature = self.temp.apply_fn({\"params\": temp_params})\n temp_loss = temperature * (entropy - self.target_entropy).mean()\n return temp_loss, {\n \"temperature\": temperature,\n \"temperature_loss\": temp_loss,\n }\n\n grads, temp_info = jax.grad(temperature_loss_fn, has_aux=True)(self.temp.params)\n temp = self.temp.apply_gradients(grads=grads)\n\n return self.replace(temp=temp), temp_info\n\n def update_critic(self, batch: DatasetDict) -> Tuple[TrainState, Dict[str, float]]:\n\n dist = self.actor.apply_fn(\n {\"params\": self.actor.params}, batch[\"next_observations\"]\n )\n\n rng = self.rng\n\n key, rng = jax.random.split(rng)\n next_actions = dist.sample(seed=key)\n\n # Used only for REDQ.\n key, rng = jax.random.split(rng)\n target_params = subsample_ensemble(\n key, self.target_critic.params, self.num_min_qs, self.num_qs\n )\n\n key, rng = jax.random.split(rng)\n next_qs = self.target_critic.apply_fn(\n {\"params\": target_params},\n batch[\"next_observations\"],\n next_actions,\n True,\n rngs={\"dropout\": key},\n ) # training=True\n next_q = next_qs.min(axis=0)\n\n target_q = batch[\"rewards\"] + self.discount * batch[\"masks\"] * next_q\n\n if self.backup_entropy:\n next_log_probs = dist.log_prob(next_actions)\n target_q -= (\n self.discount\n * batch[\"masks\"]\n * self.temp.apply_fn({\"params\": self.temp.params})\n * next_log_probs\n )\n\n key, rng = jax.random.split(rng)\n\n def critic_loss_fn(critic_params) -> Tuple[jnp.ndarray, Dict[str, float]]:\n qs = self.critic.apply_fn(\n {\"params\": critic_params},\n batch[\"observations\"],\n batch[\"actions\"],\n True,\n rngs={\"dropout\": key},\n ) # training=True\n critic_loss = ((qs - target_q) ** 2).mean()\n return critic_loss, {\"critic_loss\": critic_loss, \"q\": qs.mean()}\n\n grads, info = jax.grad(critic_loss_fn, has_aux=True)(self.critic.params)\n critic = self.critic.apply_gradients(grads=grads)\n\n target_critic_params = optax.incremental_update(\n critic.params, self.target_critic.params, self.tau\n )\n target_critic = self.target_critic.replace(params=target_critic_params)\n\n return self.replace(critic=critic, target_critic=target_critic, rng=rng), info\n\n @partial(jax.jit, static_argnames=\"utd_ratio\")\n def update(self, batch: DatasetDict, utd_ratio: int):\n\n new_agent = self\n for i in range(utd_ratio):\n\n def slice(x):\n assert x.shape[0] % utd_ratio == 0\n batch_size = x.shape[0] // utd_ratio\n return x[batch_size * i : batch_size * (i + 1)]\n\n mini_batch = jax.tree_util.tree_map(slice, batch)\n new_agent, critic_info = new_agent.update_critic(mini_batch)\n\n new_agent, actor_info = new_agent.update_actor(mini_batch)\n new_agent, temp_info = new_agent.update_temperature(actor_info[\"entropy\"])\n\n return new_agent, {**actor_info, **critic_info, **temp_info}" }, { "identifier": "DrQLearner", "path": "serl/agents/drq/drq_learner.py", "snippet": "class DrQLearner(SACLearner):\n data_augmentation_fn: Callable = struct.field(pytree_node=False)\n\n @classmethod\n def create(\n cls,\n seed: int,\n observation_space: gym.Space,\n action_space: gym.Space,\n actor_lr: float = 3e-4,\n critic_lr: float = 3e-4,\n temp_lr: float = 3e-4,\n cnn_features: Sequence[int] = (32, 32, 32, 32),\n cnn_filters: Sequence[int] = (3, 3, 3, 3),\n cnn_strides: Sequence[int] = (2, 1, 1, 1),\n cnn_padding: str = \"VALID\",\n latent_dim: int = 50,\n encoder: str = \"d4pg\",\n hidden_dims: Sequence[int] = (256, 256),\n discount: float = 0.99,\n tau: float = 0.005,\n num_qs: int = 2,\n num_min_qs: Optional[int] = None,\n critic_dropout_rate: Optional[float] = None,\n critic_layer_norm: bool = False,\n target_entropy: Optional[float] = None,\n init_temperature: float = 1.0,\n backup_entropy: bool = True,\n pixel_keys: Tuple[str, ...] = (\"pixels\",),\n depth_keys: Tuple[str, ...] = (),\n ):\n \"\"\"\n An implementation of the version of Soft-Actor-Critic described in https://arxiv.org/abs/1812.05905\n \"\"\"\n\n action_dim = action_space.shape[-1]\n observations = observation_space.sample()\n actions = action_space.sample()\n\n if target_entropy is None:\n target_entropy = -action_dim / 2\n\n rng = jax.random.PRNGKey(seed)\n rng, actor_key, critic_key, temp_key = jax.random.split(rng, 4)\n\n if encoder == \"d4pg\":\n encoder_cls = partial(\n D4PGEncoder,\n features=cnn_features,\n filters=cnn_filters,\n strides=cnn_strides,\n padding=cnn_padding,\n )\n elif encoder == \"resnet\":\n encoder_cls = partial(ResNetV2Encoder, stage_sizes=(2, 2, 2, 2))\n\n actor_base_cls = partial(MLP, hidden_dims=hidden_dims, activate_final=True)\n actor_cls = partial(TanhNormal, base_cls=actor_base_cls, action_dim=action_dim)\n actor_def = PixelMultiplexer(\n encoder_cls=encoder_cls,\n network_cls=actor_cls,\n latent_dim=latent_dim,\n stop_gradient=True,\n pixel_keys=pixel_keys,\n depth_keys=depth_keys,\n )\n actor_params = actor_def.init(actor_key, observations)[\"params\"]\n actor = TrainState.create(\n apply_fn=actor_def.apply,\n params=actor_params,\n tx=optax.adam(learning_rate=actor_lr),\n )\n\n critic_base_cls = partial(\n MLP,\n hidden_dims=hidden_dims,\n activate_final=True,\n dropout_rate=critic_dropout_rate,\n use_layer_norm=critic_layer_norm,\n )\n critic_cls = partial(StateActionValue, base_cls=critic_base_cls)\n critic_cls = partial(Ensemble, net_cls=critic_cls, num=num_qs)\n critic_def = PixelMultiplexer(\n encoder_cls=encoder_cls,\n network_cls=critic_cls,\n latent_dim=latent_dim,\n pixel_keys=pixel_keys,\n depth_keys=depth_keys,\n )\n critic_params = critic_def.init(critic_key, observations, actions)[\"params\"]\n critic = TrainState.create(\n apply_fn=critic_def.apply,\n params=critic_params,\n tx=optax.adam(learning_rate=critic_lr),\n )\n target_critic = TrainState.create(\n apply_fn=critic_def.apply,\n params=critic_params,\n tx=optax.GradientTransformation(lambda _: None, lambda _: None),\n )\n\n temp_def = Temperature(init_temperature)\n temp_params = temp_def.init(temp_key)[\"params\"]\n temp = TrainState.create(\n apply_fn=temp_def.apply,\n params=temp_params,\n tx=optax.adam(learning_rate=temp_lr),\n )\n\n def data_augmentation_fn(rng, observations):\n for pixel_key, depth_key in zip_longest(pixel_keys, depth_keys):\n key, rng = jax.random.split(rng)\n observations = batched_random_crop(key, observations, pixel_key)\n if depth_key is not None:\n observations = batched_random_crop(key, observations, depth_key)\n return observations\n\n return cls(\n rng=rng,\n actor=actor,\n critic=critic,\n target_critic=target_critic,\n temp=temp,\n target_entropy=target_entropy,\n tau=tau,\n discount=discount,\n num_qs=num_qs,\n num_min_qs=num_min_qs,\n backup_entropy=backup_entropy,\n data_augmentation_fn=data_augmentation_fn,\n )\n\n @partial(jax.jit, static_argnames=(\"utd_ratio\", \"pixel_keys\"))\n def update(self, batch: DatasetDict, utd_ratio: int, pixel_keys=(\"pixels\",)):\n '''\n Update the agent's parameters (actor and critic) using the batch of data from the replay buffer.\n We apply data augmentation to both observations and next_observation,\n then we share the encoder params between actor and critic.\n\n :param batch: a batch of data from the replay buffer, a dataset dict\n :param utd_ratio: the number of times to update the critic for each update of the actor\n :param pixel_keys: pixel keys to apply data augmentation to\n :return: the updated agent and the update info dict\n '''\n new_agent = self\n\n if pixel_keys[0] not in batch[\"next_observations\"]:\n batch = _unpack(batch)\n\n actor = _share_encoder(source=new_agent.critic, target=new_agent.actor)\n new_agent = new_agent.replace(actor=actor)\n\n rng, key = jax.random.split(new_agent.rng)\n observations = self.data_augmentation_fn(key, batch[\"observations\"])\n rng, key = jax.random.split(rng)\n next_observations = self.data_augmentation_fn(key, batch[\"next_observations\"])\n\n batch = batch.copy(\n add_or_replace={\n \"observations\": observations,\n \"next_observations\": next_observations,\n }\n )\n new_agent = new_agent.replace(rng=rng)\n return SACLearner.update(new_agent, batch, utd_ratio)" }, { "identifier": "Temperature", "path": "serl/agents/sac/temperature.py", "snippet": "class Temperature(nn.Module):\n initial_temperature: float = 1.0\n\n @nn.compact\n def __call__(self) -> jnp.ndarray:\n log_temp = self.param(\n \"log_temp\",\n init_fn=lambda key: jnp.full((), jnp.log(self.initial_temperature)),\n )\n return jnp.exp(log_temp)" }, { "identifier": "DatasetDict", "path": "serl/data/dataset.py", "snippet": "def _check_lengths(dataset_dict: DatasetDict, dataset_len: Optional[int] = None) -> int:\ndef _subselect(dataset_dict: DatasetDict, index: np.ndarray) -> DatasetDict:\ndef _sample(\n dataset_dict: Union[np.ndarray, DatasetDict], indx: np.ndarray\n) -> DatasetDict:\n def __init__(self, dataset_dict: DatasetDict, seed: Optional[int] = None):\n def np_random(self) -> np.random.RandomState:\n def seed(self, seed: Optional[int] = None) -> list:\n def __len__(self) -> int:\n def sample(\n self,\n batch_size: int,\n keys: Optional[Iterable[str]] = None,\n indx: Optional[np.ndarray] = None,\n ) -> frozen_dict.FrozenDict:\n def sample_jax(self, batch_size: int, keys: Optional[Iterable[str]] = None):\n def _sample_jax(rng, src, max_indx: int):\n def split(self, ratio: float) -> Tuple[\"Dataset\", \"Dataset\"]:\n def _trajectory_boundaries_and_returns(self) -> Tuple[list, list, list]:\n def filter(\n self, take_top: Optional[float] = None, threshold: Optional[float] = None\n ):\n def normalize_returns(self, scaling: float = 1000):\nclass Dataset(object):" }, { "identifier": "TanhNormal", "path": "serl/distributions/tanh_normal.py", "snippet": "class Normal(nn.Module):\n def __call__(self, inputs, *args, **kwargs) -> tfd.Distribution:" }, { "identifier": "Ensemble", "path": "serl/networks/ensemble.py", "snippet": "class Ensemble(nn.Module):\n net_cls: Type[nn.Module]\n num: int = 2\n\n @nn.compact\n def __call__(self, *args):\n ensemble = nn.vmap(\n self.net_cls,\n variable_axes={\"params\": 0},\n split_rngs={\"params\": True, \"dropout\": True},\n in_axes=None,\n out_axes=0,\n axis_size=self.num,\n )\n return ensemble()(*args)" }, { "identifier": "MLP", "path": "serl/networks/mlp.py", "snippet": "class MLP(nn.Module):\n hidden_dims: Sequence[int]\n activations: Callable[[jnp.ndarray], jnp.ndarray] = nn.relu\n activate_final: bool = False\n use_layer_norm: bool = False\n scale_final: Optional[float] = None\n dropout_rate: Optional[float] = None\n spectral_norm: bool = False\n\n @nn.compact\n def __call__(self, x: jnp.ndarray, training: bool = False) -> jnp.ndarray:\n\n for i, size in enumerate(self.hidden_dims):\n if i + 1 == len(self.hidden_dims) and self.scale_final is not None:\n x = nn.Dense(size, kernel_init=default_init(self.scale_final))(x)\n else:\n x = nn.Dense(size, kernel_init=default_init())(x)\n\n if i + 1 < len(self.hidden_dims) or self.activate_final:\n if self.dropout_rate is not None and self.dropout_rate > 0:\n x = nn.Dropout(rate=self.dropout_rate)(\n x, deterministic=not training\n )\n if self.use_layer_norm:\n x = nn.LayerNorm()(x)\n x = self.activations(x)\n return x" }, { "identifier": "PixelMultiplexer", "path": "serl/networks/pixel_multiplexer.py", "snippet": "class PixelMultiplexer(nn.Module):\n encoder_cls: Type[nn.Module]\n network_cls: Type[nn.Module]\n latent_dim: int\n stop_gradient: bool = False\n pixel_keys: Tuple[str, ...] = (\"pixels\",)\n depth_keys: Tuple[str, ...] = ()\n\n @nn.compact\n def __call__(\n self,\n observations: Union[FrozenDict, Dict],\n actions: Optional[jnp.ndarray] = None,\n training: bool = False,\n ) -> jnp.ndarray:\n observations = FrozenDict(observations)\n image_obs, state_obs = observations.pop(\"state\")\n reshape_img = lambda x: x.reshape(*x.shape[:-2], -1) / 255.0\n image_obs = jax.tree_map(reshape_img, image_obs)\n\n x = self.encoder_cls(name=f\"image_encoder\")(image_obs, training)\n if self.stop_gradient:\n # We do not update conv layers with policy gradients.\n x = jax.lax.stop_gradient(x)\n x = nn.Dense(512, kernel_init=default_init())(x)\n x = nn.LayerNorm()(x)\n x = nn.tanh(x)\n\n if \"state\" in observations:\n y = nn.Dense(self.latent_dim, kernel_init=default_init())(\n observations[\"state\"]\n )\n y = nn.LayerNorm()(y)\n y = nn.tanh(y)\n\n x = jnp.concatenate([x, y], axis=-1)\n\n if actions is None:\n return self.network_cls()(x, training)\n else:\n return self.network_cls()(x, actions, training)" }, { "identifier": "StateActionValue", "path": "serl/networks/state_action_value.py", "snippet": "class StateActionValue(nn.Module):\n base_cls: nn.Module\n\n @nn.compact\n def __call__(\n self, observations: jnp.ndarray, actions: jnp.ndarray, *args, **kwargs\n ) -> jnp.ndarray:\n inputs = jnp.concatenate([observations, actions], axis=-1)\n outputs = self.base_cls()(inputs, *args, **kwargs)\n\n value = nn.Dense(1, kernel_init=default_init())(outputs)\n\n return jnp.squeeze(value, -1)" }, { "identifier": "TwoD4PGEncoder", "path": "serl/networks/encoders/two_d4pg_encoder.py", "snippet": "class TwoD4PGEncoder(nn.Module):\n features: Sequence[int] = (32, 32, 32, 32)\n filters: Sequence[int] = (2, 1, 1, 1)\n strides: Sequence[int] = (2, 1, 1, 1)\n padding: str = \"VALID\"\n activations: Callable[[jnp.ndarray], jnp.ndarray] = nn.relu\n\n @nn.compact\n def __call__(self, x: jnp.ndarray, training=False) -> jnp.ndarray:\n assert len(self.features) == len(self.strides)\n\n processed_tensors = []\n reshape = False\n\n # Loop through all the tensors in the input FrozenDict\n for key, tensor in x.items():\n # Expand dimensions if they are 3\n if tensor.ndim == 3:\n tensor = tensor[None, ...]\n reshape = True\n\n # Apply Conv layers\n for features, filter_, stride in zip(self.features, self.filters, self.strides):\n tensor = nn.Conv(\n features,\n kernel_size=(filter_, filter_),\n strides=(stride, stride),\n kernel_init=default_init(),\n padding=self.padding,\n )(tensor)\n tensor = self.activations(tensor)\n\n tensor = SpatialLearnedEmbeddings(*(tensor.shape[1:]), 8)(tensor)\n processed_tensors.append(tensor)\n\n # Concatenate all processed tensors along the last axis\n concatenated_tensor = jnp.concatenate(processed_tensors, axis=-1)\n\n # Reshape if original tensors were 3D\n if reshape:\n concatenated_tensor = concatenated_tensor.reshape(-1)\n\n return concatenated_tensor" }, { "identifier": "MobileNetEncoder", "path": "serl/networks/encoders/mobilenet_encoder.py", "snippet": "class MobileNetEncoder(nn.Module):\n mobilenet: Callable[..., Callable]\n params: FrozenDict\n stop_gradient: bool = False\n\n @nn.compact\n def __call__(self, x: jnp.ndarray, training=False, divide_by=False, reshape=False) -> jnp.ndarray:\n '''\n encode an image using the mobilenet encoder\n TODO: it should work for all pretrained encoders, not just mobilenet.\n\n :param x: input image\n :param training: whether the network is in training mode\n :param divide_by: whether to divide the image by 255\n :param reshape: whether to reshape the image before passing into encoder\n :return: the encoded image\n '''\n\n mean = jnp.array((0.485, 0.456, 0.406))[None, ...]\n std = jnp.array((0.229, 0.224, 0.225))[None, ...]\n\n if reshape:\n x = jnp.reshape(x, (*x.shape[:-2], -1))\n\n if divide_by:\n x = x.astype(jnp.float32) / 255.0\n x = (x - mean) / std\n\n if x.ndim == 3:\n x = x[None, ...]\n x = self.mobilenet.apply(self.params, x, mutable=False, training=False)\n elif x.ndim == 4:\n x = self.mobilenet.apply(self.params, x, mutable=False, training=False)\n else:\n raise NotImplementedError('ndim is not 3 or 4')\n\n if self.stop_gradient:\n x = jax.lax.stop_gradient(x)\n\n return x" }, { "identifier": "TwoMobileNetEncoder", "path": "serl/networks/encoders/two_mobilenet_encoder.py", "snippet": "class TwoMobileNetEncoder(nn.Module):\n mobilenet: nn.Module\n params: FrozenDict\n dropout_rate: float = 0.1\n\n @nn.compact\n def __call__(self, x: FrozenDict[str, jnp.ndarray], training=False) -> jnp.ndarray:\n processed_tensors = []\n reshape = False\n mean = jnp.array((0.485, 0.456, 0.406))[None, ...]\n std = jnp.array((0.229, 0.224, 0.225))[None, ...]\n\n # Loop through all the tensors in the input FrozenDict\n for key, tensor in x.items():\n # Expand dimensions if they are 3\n if tensor.ndim == 3:\n tensor = tensor[None, ...]\n reshape = True\n\n # Apply mobilenet\n tensor = (tensor - mean) / std # normalize using ImageNet stats\n tensor = self.mobilenet.apply(self.params, tensor, training=False)\n # Apply SpatialLearnedEmbeddings and Dropout\n tensor = SpatialLearnedEmbeddings(*(tensor.shape[1:]), 8)(tensor)\n tensor = nn.Dropout(self.dropout_rate)(tensor, deterministic=not training)\n\n processed_tensors.append(tensor)\n\n # Concatenate all processed tensors along the last axis\n concatenated_tensor = jnp.concatenate(processed_tensors, axis=-1)\n\n # Reshape if original tensors were 3D\n if reshape:\n concatenated_tensor = concatenated_tensor.reshape(-1)\n\n return concatenated_tensor" }, { "identifier": "EncodedEncoder", "path": "serl/networks/encoded_encoder.py", "snippet": "class EncodedEncoder(nn.Module):\n network_cls: Type[nn.Module]\n latent_dim: int\n stop_gradient: bool = False\n pixel_key: str = \"pixels\"\n dropout_rate: float = 0.1\n\n @nn.compact\n def __call__(\n self,\n observations: Union[FrozenDict, Dict],\n training: bool = False,\n ) -> jnp.ndarray:\n observations = FrozenDict(observations)\n x = observations[self.pixel_key]\n\n if x.ndim == 3:\n x = x[None, :]\n\n x = SpatialLearnedEmbeddings(*(x.shape[1:]), 8)(x)\n x = nn.Dropout(self.dropout_rate)(x, deterministic=not training)\n\n if x.shape[0] == 1:\n x = x.reshape(-1)\n else:\n x = x.reshape((x.shape[0], -1))\n\n if self.stop_gradient:\n # We do not update conv layers with policy gradients.\n x = jax.lax.stop_gradient(x)\n\n x = nn.Dense(512, kernel_init=default_init())(x)\n x = nn.LayerNorm()(x)\n x = nn.tanh(x)\n\n return self.network_cls()(x, training)" }, { "identifier": "OneDimOutput", "path": "serl/networks/one_d_output.py", "snippet": "class OneDimOutput(nn.Module):\n base_cls: nn.Module\n\n @nn.compact\n def __call__(\n self, observations: jnp.ndarray, *args, **kwargs\n ) -> jnp.ndarray:\n if self.base_cls:\n outputs = self.base_cls()(observations, *args, **kwargs)\n else:\n outputs = observations\n\n value = nn.Dense(1, kernel_init=default_init())(outputs)\n return jnp.squeeze(value, -1)" }, { "identifier": "_unpack", "path": "serl/utils/commons.py", "snippet": "def _unpack(batch: DatasetDict):\n '''\n Helps to minimize CPU to GPU transfer.\n Assuming that if next_observation is missing, it's combined with observation:\n\n :param batch: a batch of data from the replay buffer, a dataset dict\n :return: a batch of unpacked data, a dataset dict\n '''\n\n for pixel_key in batch[\"observations\"].keys():\n if pixel_key not in batch[\"next_observations\"]:\n obs_pixels = batch[\"observations\"][pixel_key][..., :-1]\n next_obs_pixels = batch[\"observations\"][pixel_key][..., 1:]\n\n obs = batch[\"observations\"].copy(add_or_replace={pixel_key: obs_pixels})\n next_obs = batch[\"next_observations\"].copy(\n add_or_replace={pixel_key: next_obs_pixels}\n )\n batch = batch.copy(\n add_or_replace={\"observations\": obs, \"next_observations\": next_obs}\n )\n\n return batch" }, { "identifier": "_share_encoder", "path": "serl/utils/commons.py", "snippet": "def _share_encoder(source, target):\n '''\n Share encoder params between source and target:\n \n :param source: the source network, TrainState\n :param target: the target network, TrainState\n '''\n\n replacers = {}\n for k, v in source.params.items():\n if \"encoder\" in k:\n replacers[k] = v\n\n # e.g., Use critic conv layers in actor:\n new_params = target.params.copy(add_or_replace=replacers)\n return target.replace(params=new_params)" } ]
from functools import partial from itertools import zip_longest from typing import Callable, Dict, Optional, Sequence, Tuple, OrderedDict from collections import OrderedDict from jax import numpy as jnp from flax import struct from flax.core import FrozenDict, freeze from flax.training.train_state import TrainState from serl.utils.augmentations import batched_random_crop from serl.agents.sac.sac_learner import SACLearner from serl.agents.drq.drq_learner import DrQLearner from serl.agents.sac.temperature import Temperature from serl.data.dataset import DatasetDict from serl.distributions import TanhNormal from serl.networks import MLP, Ensemble, PixelMultiplexer, StateActionValue from serl.networks.encoders import TwoMobileNetEncoder, MobileNetEncoder, TwoD4PGEncoder from serl.networks.encoded_encoder import EncodedEncoder from serl.networks.one_d_output import OneDimOutput from serl.utils.commons import _unpack, _share_encoder from jeffnet.linen import create_model, EfficientNet import gym import jax import optax import flax.linen as nn
8,365
"""Implementations of algorithms for continuous control.""" class VICELearner(DrQLearner): vice_classifiers: OrderedDict[str, TrainState] vice_label_smoothing: float vice_goal_pool: jnp.ndarray vice_encoder: TrainState vice_encoder_params: FrozenDict @classmethod def create( cls, seed: int, observation_space: gym.Space, action_space: gym.Space, actor_lr: float = 3e-4, critic_lr: float = 3e-4, vice_lr: float = 3e-4, temp_lr: float = 3e-4, cnn_features: Sequence[int] = (32, 32, 32, 32), cnn_filters: Sequence[int] = (3, 3, 3, 3), cnn_strides: Sequence[int] = (2, 1, 1, 1), cnn_padding: str = "VALID", latent_dim: int = 50, encoder: str = "d4pg", hidden_dims: Sequence[int] = (256, 256), discount: float = 0.99, tau: float = 0.005, num_qs: int = 2, num_min_qs: Optional[int] = None, critic_dropout_rate: Optional[float] = None, vice_dropout_rate: Optional[float] = None, vice_label_smoothing: float = 0.1, critic_layer_norm: bool = False, target_entropy: Optional[float] = None, init_temperature: float = 1.0, backup_entropy: bool = True, pixel_keys: Tuple[str, ...] = ("pixels",), depth_keys: Tuple[str, ...] = (), vice_goal_pool: jnp.ndarray = None ): """ An implementation of the version of Soft-Actor-Critic described in https://arxiv.org/abs/1812.05905 """ action_dim = action_space.shape[-1] observations = observation_space.sample() actions = action_space.sample() if target_entropy is None: target_entropy = -action_dim rng = jax.random.PRNGKey(seed) rng, actor_key, critic_key, temp_key, vice_encoder_key = jax.random.split(rng, 5) rng_vice_keys = jax.random.split(rng, 1 + len(pixel_keys)) rng, vice_keys = rng_vice_keys[0], rng_vice_keys[1:] if encoder == "d4pg": encoder_cls = partial( TwoD4PGEncoder, features=cnn_features, filters=cnn_filters, strides=cnn_strides, padding=cnn_padding, ) elif encoder == "resnet": raise NotImplementedError elif encoder == "mobilenet": MobileNet, mobilenet_variables = create_model('tf_mobilenetv3_large_100', pretrained=True) encoder_cls = partial(TwoMobileNetEncoder, mobilenet=MobileNet, params=mobilenet_variables) actor_base_cls = partial(MLP, hidden_dims=hidden_dims, activate_final=True) actor_cls = partial(TanhNormal, base_cls=actor_base_cls, action_dim=action_dim) actor_def = PixelMultiplexer( encoder_cls=encoder_cls, network_cls=actor_cls, latent_dim=latent_dim, stop_gradient=True, # do not update the encoder params pixel_keys=pixel_keys, depth_keys=depth_keys, ) actor_params = actor_def.init(actor_key, observations)["params"] actor = TrainState.create( apply_fn=actor_def.apply, params=actor_params, tx=optax.adam(learning_rate=actor_lr), ) critic_base_cls = partial( MLP, hidden_dims=hidden_dims, activate_final=True, dropout_rate=critic_dropout_rate, use_layer_norm=critic_layer_norm, )
"""Implementations of algorithms for continuous control.""" class VICELearner(DrQLearner): vice_classifiers: OrderedDict[str, TrainState] vice_label_smoothing: float vice_goal_pool: jnp.ndarray vice_encoder: TrainState vice_encoder_params: FrozenDict @classmethod def create( cls, seed: int, observation_space: gym.Space, action_space: gym.Space, actor_lr: float = 3e-4, critic_lr: float = 3e-4, vice_lr: float = 3e-4, temp_lr: float = 3e-4, cnn_features: Sequence[int] = (32, 32, 32, 32), cnn_filters: Sequence[int] = (3, 3, 3, 3), cnn_strides: Sequence[int] = (2, 1, 1, 1), cnn_padding: str = "VALID", latent_dim: int = 50, encoder: str = "d4pg", hidden_dims: Sequence[int] = (256, 256), discount: float = 0.99, tau: float = 0.005, num_qs: int = 2, num_min_qs: Optional[int] = None, critic_dropout_rate: Optional[float] = None, vice_dropout_rate: Optional[float] = None, vice_label_smoothing: float = 0.1, critic_layer_norm: bool = False, target_entropy: Optional[float] = None, init_temperature: float = 1.0, backup_entropy: bool = True, pixel_keys: Tuple[str, ...] = ("pixels",), depth_keys: Tuple[str, ...] = (), vice_goal_pool: jnp.ndarray = None ): """ An implementation of the version of Soft-Actor-Critic described in https://arxiv.org/abs/1812.05905 """ action_dim = action_space.shape[-1] observations = observation_space.sample() actions = action_space.sample() if target_entropy is None: target_entropy = -action_dim rng = jax.random.PRNGKey(seed) rng, actor_key, critic_key, temp_key, vice_encoder_key = jax.random.split(rng, 5) rng_vice_keys = jax.random.split(rng, 1 + len(pixel_keys)) rng, vice_keys = rng_vice_keys[0], rng_vice_keys[1:] if encoder == "d4pg": encoder_cls = partial( TwoD4PGEncoder, features=cnn_features, filters=cnn_filters, strides=cnn_strides, padding=cnn_padding, ) elif encoder == "resnet": raise NotImplementedError elif encoder == "mobilenet": MobileNet, mobilenet_variables = create_model('tf_mobilenetv3_large_100', pretrained=True) encoder_cls = partial(TwoMobileNetEncoder, mobilenet=MobileNet, params=mobilenet_variables) actor_base_cls = partial(MLP, hidden_dims=hidden_dims, activate_final=True) actor_cls = partial(TanhNormal, base_cls=actor_base_cls, action_dim=action_dim) actor_def = PixelMultiplexer( encoder_cls=encoder_cls, network_cls=actor_cls, latent_dim=latent_dim, stop_gradient=True, # do not update the encoder params pixel_keys=pixel_keys, depth_keys=depth_keys, ) actor_params = actor_def.init(actor_key, observations)["params"] actor = TrainState.create( apply_fn=actor_def.apply, params=actor_params, tx=optax.adam(learning_rate=actor_lr), ) critic_base_cls = partial( MLP, hidden_dims=hidden_dims, activate_final=True, dropout_rate=critic_dropout_rate, use_layer_norm=critic_layer_norm, )
critic_cls = partial(StateActionValue, base_cls=critic_base_cls)
9
2023-11-02 23:32:24+00:00
12k
tiendatnguyen-vision/Orbit-symmetrize
RotatedMNIST/LPS/emlp-pytorch/emlp_pytorch/reps/representation.py
[ { "identifier": "Group", "path": "RotatedMNIST/LPS/emlp-pytorch/emlp_pytorch/groups.py", "snippet": "class Group(nn.Module):\n \"\"\" Abstract Group Object which new groups should inherit from. \"\"\"\n\n def __init__(self):\n super().__init__()\n self.lie_algebra = NotImplemented # The continuous generators\n self.discrete_generators = NotImplemented # The discrete generators\n self.z_scale = None # For scale noise for sampling elements\n self.is_orthogonal = None\n self.is_permutation = None\n self.d = NotImplemented # The dimension of the base representation\n self.device = torch.device('cpu')\n self.args = None\n\n def init(self, *args):\n \"\"\" Initialize the group object. \"\"\"\n # get the dimension of the base group representation\n if self.d is NotImplemented:\n if (self.lie_algebra is not NotImplemented) and \\\n len(self.lie_algebra) > 0:\n self.d = self.lie_algebra[0].size(-1)\n if (self.discrete_generators is not NotImplemented) and \\\n len(self.discrete_generators) > 0:\n self.d = self.discrete_generators[0].size(-1)\n\n self.args = args\n\n if self.lie_algebra is NotImplemented:\n self.lie_algebra = torch.zeros((0, self.d, self.d), device=self.device)\n if self.discrete_generators is NotImplemented:\n self.discrete_generators = torch.zeros((0, self.d, self.d), device=self.device)\n\n self.to(self.device)\n\n # set orthogonal flag automatically if not specified\n if self.is_permutation:\n self.is_orthogonal = True\n if self.is_orthogonal is None:\n self.is_orthogonal = True\n if len(self.lie_algebra) != 0:\n Id = torch.eye(self.d, device=self.device)\n A_dense = torch.stack([[email protected](Ai.dtype) for Ai in self.lie_algebra])\n self.is_orthogonal &= rel_err(-A_dense.transpose(2, 1), A_dense) < 1e-6\n if len(self.discrete_generators) != 0:\n Id = torch.eye(self.d, device=self.device)\n h_dense = torch.stack([[email protected](hi.dtype) for hi in self.discrete_generators])\n self.is_orthogonal &= rel_err(h_dense.transpose(2, 1)@h_dense, Id[None]) < 1e-6\n\n # set regular flag automatically if not specified\n if self.is_orthogonal and (self.is_permutation is None):\n self.is_permutation = True\n # no infinitesmal generators and all rows have one 1\n self.is_permutation &= (len(self.lie_algebra) == 0)\n if len(self.discrete_generators) != 0:\n Id = torch.eye(self.d, device=self.device)\n h_dense = torch.stack([[email protected](hi.dtype) for hi in self.discrete_generators])\n self.is_permutation &= (((h_dense-1).abs()<1e-6).long().sum(-1) == 1).all()\n\n def exp(self, A):\n \"\"\" Matrix exponential \"\"\"\n return torch.linalg.matrix_exp(A)\n\n def num_constraints(self):\n \"\"\" Number of constraints to solve for the group \"\"\"\n return len(self.lie_algebra)+len(self.discrete_generators)\n\n def sample(self):\n \"\"\"Draw a sample from the group (not necessarily Haar measure)\"\"\"\n return self.samples(1)[0]\n\n def samples(self, N):\n \"\"\" Draw N samples from the group (not necessarily Haar measure)\"\"\"\n Id = torch.eye(self.d, device=self.device)\n A_dense = torch.stack([[email protected](Ai.dtype) for Ai in self.lie_algebra]) \\\n if len(self.lie_algebra) \\\n else torch.zeros((0, self.d, self.d), device=self.device)\n h_dense = torch.stack([[email protected](hi.dtype) for hi in self.discrete_generators]) \\\n if len(self.discrete_generators) \\\n else torch.zeros((0, self.d, self.d), device=self.device)\n z = torch.randn(N, A_dense.size(0), device=self.device)\n if self.z_scale is not None:\n z *= self.z_scale\n k = torch.randint(-MAX_POWER, MAX_POWER+1, (N, h_dense.size(0), 3), device=self.device)\n return noise2samples(z, k, A_dense, h_dense)\n\n def check_valid_group_elems(self, g):\n \"\"\" Check that the group elements are valid \"\"\"\n return True\n\n def __str__(self):\n return repr(self)\n\n def __repr__(self):\n outstr = f\"{self.__class__}\"\n if self.args:\n outstr += '('+''.join(repr(arg) for arg in self.args)+')'\n return outstr\n\n def __eq__(self, G2): # TODO: more permissive by checking that spans are equal?\n return repr(self) == repr(G2)\n\n def __hash__(self):\n return hash(repr(self))\n\n def __lt__(self, other):\n \"\"\" For sorting purposes only \"\"\"\n return hash(self) < hash(other)\n\n def __mul__(self, other):\n return DirectProduct(self, other)\n\n def forward(self):\n \"\"\" Forward method, unused. \"\"\"\n return None\n\n def to(self, *args, **kwargs):\n \"\"\" Move the group to the specified device \"\"\"\n if isinstance(self.lie_algebra, torch.Tensor):\n self.lie_algebra = self.lie_algebra.to(*args, **kwargs)\n elif isinstance(self.lie_algebra, list):\n self.lie_algebra = [Ai.to(*args, **kwargs) for Ai in self.lie_algebra]\n if isinstance(self.discrete_generators, torch.Tensor):\n self.discrete_generators = self.discrete_generators.to(*args, **kwargs)\n elif isinstance(self.discrete_generators, list):\n self.discrete_generators = [hi.to(*args, **kwargs) for hi in self.discrete_generators]\n if self.z_scale is not None:\n self.z_scale = self.z_scale.to(*args, **kwargs)\n self.device = torch.empty(0).to(*args, **kwargs).device\n return self" }, { "identifier": "LinearOperator", "path": "RotatedMNIST/LPS/emlp-pytorch/emlp_pytorch/reps/linear_operator_base.py", "snippet": "class LinearOperator(nn.Module):\n \"\"\" Common interface for performing matrix vector products\n Many iterative methods (e.g. cg, gmres) do not need to know the\n individual entries of a matrix to solve a linear system A*x=b.\n Such solvers only require the computation of matrix vector\n products, A*v where v is a dense vector. This class serves as\n an abstract interface between iterative solvers and matrix-like\n objects.\n To construct a concrete LinearOperator, either pass appropriate\n callables to the constructor of this class, or subclass it.\n A subclass must implement either one of the methods ``_matvec``\n and ``_matmat``, and the attributes/properties ``shape`` (pair of\n integers) and ``dtype`` (may be None). It may call the ``__init__``\n on this class to have these attributes validated. Implementing\n ``_matvec`` automatically implements ``_matmat`` (using a naive\n algorithm) and vice-versa.\n Optionally, a subclass may implement ``_rmatvec`` or ``_adjoint``\n to implement the Hermitian adjoint (conjugate transpose). As with\n ``_matvec`` and ``_matmat``, implementing either ``_rmatvec`` or\n ``_adjoint`` implements the other automatically. Implementing\n ``_adjoint`` is preferable; ``_rmatvec`` is mostly there for\n backwards compatibility.\n Parameters\n ----------\n shape : tuple\n Matrix dimensions (M, N).\n matvec : callable f(v)\n Returns returns A * v.\n rmatvec : callable f(v)\n Returns A^H * v, where A^H is the conjugate transpose of A.\n matmat : callable f(V)\n Returns A * V, where V is a dense matrix with dimensions (N, K).\n dtype : dtype\n Data type of the matrix.\n rmatmat : callable f(V)\n Returns A^H * V, where V is a dense matrix with dimensions (M, K).\n Attributes\n ----------\n args : tuple\n For linear operators describing products etc. of other linear\n operators, the operands of the binary operation.\n ndim : int\n Number of dimensions (this is always 2)\n See Also\n --------\n aslinearoperator : Construct LinearOperators\n Notes\n -----\n The user-defined matvec() function must properly handle the case\n where v has shape (N,) as well as the (N,1) case. The shape of\n the return type is handled internally by LinearOperator.\n LinearOperator instances can also be multiplied, added with each\n other and exponentiated, all lazily: the result of these operations\n is always a new, composite LinearOperator, that defers linear\n operations to the original operators and combines the results.\n More details regarding how to subclass a LinearOperator and several\n examples of concrete LinearOperator instances can be found in the\n external project `PyLops <https://pylops.readthedocs.io>`_.\n Examples\n --------\n >>> def mv(v):\n ... return torch.tensor([2*v[0], 3*v[1]])\n ...\n >>> A = LinearOperator((2,2), matvec=mv)\n >>> A\n <2x2 _CustomLinearOperator with dtype=float64>\n >>> A.matvec(torch.ones(2))\n tensor([ 2., 3.])\n >>> A * torch.ones(2)\n tensor([ 2., 3.])\n \"\"\"\n\n def __new__(cls, *args, **kwargs):\n if cls is LinearOperator:\n # Operate as _CustomLinearOperator factory.\n return super(LinearOperator, cls).__new__(_CustomLinearOperator)\n\n obj = super(LinearOperator, cls).__new__(cls)\n if (type(obj)._matvec == LinearOperator._matvec\n and type(obj)._matmat == LinearOperator._matmat):\n warnings.warn(\"LinearOperator subclass should implement\"\n \" at least one of _matvec and _matmat.\",\n category=RuntimeWarning, stacklevel=2)\n return obj\n\n def __init__(self):\n super().__init__()\n self.ndim = 2\n self.dtype = None\n self.shape = None\n self.device = None\n\n def init(self, dtype, shape, device):\n \"\"\" Initialize this LinearOperator.\n To be called by subclasses. ``dtype`` may be None; ``shape`` should\n be convertible to a length-2 tuple.\n Called from subclasses at the end of the __init__ routine.\n \"\"\"\n if dtype is None:\n dtype = torch.float # force float 32\n else:\n if not isinstance(dtype, torch.dtype):\n dtype = torch_dtype(dtype)\n\n shape = tuple(shape)\n if not isshape(shape):\n raise ValueError(f\"invalid shape {(shape,)} (must be 2-d)\")\n\n self.dtype = dtype\n self.shape = torch.Size(shape)\n self.device = torch_device(device)\n\n def size(self, dim=None):\n \"\"\" Return the size of this LinearOperator.\n This is a synonym for ``shape``.\n \"\"\"\n return self.shape if dim is None else self.shape[dim]\n\n def _matmat(self, V):\n \"\"\" Default matrix-matrix multiplication handler.\n Falls back on the user-defined _matvec method, so defining that will\n define matrix multiplication (though in a very suboptimal way).\n \"\"\"\n return torch.hstack([self.matvec(col.reshape(-1, 1)) for col in V.T])\n\n def _matvec(self, v):\n \"\"\" Default matrix-vector multiplication handler.\n If self is a linear operator of shape (M, N), then this method will\n be called on a shape (N,) or (N, 1) ndarray, and should return a\n shape (M,) or (M, 1) ndarray.\n This default implementation falls back on _matmat, so defining that\n will define matrix-vector multiplication as well.\n \"\"\"\n return self.matmat(v.reshape(-1, 1))\n\n def matvec(self, v):\n \"\"\" Matrix-vector multiplication.\n Performs the operation y=A*v where A is an MxN linear\n operator and v is a column vector or 1-d array.\n Parameters\n ----------\n v : {matrix, ndarray}\n An array with shape (N,) or (N,1).\n Returns\n -------\n y : {matrix, ndarray}\n A matrix or ndarray with shape (M,) or (M,1) depending\n on the type and shape of the x argument.\n Notes\n -----\n This matvec wraps the user-specified matvec routine or overridden\n _matvec method to ensure that y has the correct shape and type.\n \"\"\"\n M, N = self.shape\n if v.shape != (N,) and v.shape != (N, 1):\n raise ValueError('dimension mismatch')\n\n y = self._matvec(v)\n\n if v.ndim == 1:\n y = y.reshape(M)\n elif v.ndim == 2:\n y = y.reshape(M, 1)\n else:\n raise ValueError('invalid shape returned by user-defined matvec()')\n\n return y\n\n def rmatvec(self, v):\n \"\"\" Adjoint matrix-vector multiplication.\n Performs the operation y = A^H * v where A is an MxN linear\n operator and v is a column vector or 1-d array.\n Parameters\n ----------\n v : {matrix, ndarray}\n An array with shape (M,) or (M,1).\n Returns\n -------\n y : {matrix, ndarray}\n A matrix or ndarray with shape (N,) or (N,1) depending\n on the type and shape of the v argument.\n Notes\n -----\n This rmatvec wraps the user-specified rmatvec routine or overridden\n _rmatvec method to ensure that y has the correct shape and type.\n \"\"\"\n M, N = self.shape\n\n if v.shape != (M,) and v.shape != (M, 1):\n raise ValueError('dimension mismatch')\n\n y = self._rmatvec(v)\n\n if v.ndim == 1:\n y = y.reshape(N)\n elif v.ndim == 2:\n y = y.reshape(N, 1)\n else:\n raise ValueError('invalid shape returned by user-defined rmatvec()')\n\n return y\n\n def _rmatvec(self, v):\n \"\"\" Default implementation of _rmatvec; defers to adjoint. \"\"\"\n if type(self)._adjoint == LinearOperator._adjoint:\n # _adjoint not overridden, prevent infinite recursion\n raise NotImplementedError\n return self.H().matvec(v)\n\n def matmat(self, V):\n \"\"\" Matrix-matrix multiplication.\n Performs the operation y=A*V where A is an MxN linear\n operator and V dense N*K matrix or ndarray.\n Parameters\n ----------\n V : {matrix, ndarray}\n An array with shape (N,K).\n Returns\n -------\n Y : {matrix, ndarray}\n A matrix or ndarray with shape (M,K) depending on\n the type of the V argument.\n Notes\n -----\n This matmat wraps any user-specified matmat routine or overridden\n _matmat method to ensure that y has the correct type.\n \"\"\"\n if V.ndim != 2:\n raise ValueError(f'expected 2-d ndarray or matrix, not {V.ndim}-d')\n\n if V.size(0) != self.size(1):\n raise ValueError(f'dimension mismatch: {self.shape}, {V.shape}')\n\n Y = self._matmat(V)\n return Y\n\n def rmatmat(self, V):\n \"\"\" Adjoint matrix-matrix multiplication.\n Performs the operation y = A^H * V where A is an MxN linear\n operator and V is a column vector or 1-d array, or 2-d array.\n The default implementation defers to the adjoint.\n Parameters\n ----------\n V : {matrix, ndarray}\n A matrix or 2D array.\n Returns\n -------\n Y : {matrix, ndarray}\n A matrix or 2D array depending on the type of the input.\n Notes\n -----\n This rmatmat wraps the user-specified rmatmat routine.\n \"\"\"\n if V.ndim != 2:\n raise ValueError(f'expected 2-d matrix, not {V.ndim}-d')\n\n if V.size(0) != self.size(0):\n raise ValueError(f'dimension mismatch: {self.shape}, {V.shape}')\n\n Y = self._rmatmat(V)\n return Y\n\n def _rmatmat(self, V):\n \"\"\" Default implementation of _rmatmat defers to rmatvec or adjoint. \"\"\"\n if type(self)._adjoint == LinearOperator._adjoint:\n return torch.hstack([self.rmatvec(col.reshape(-1, 1)) for col in V.T])\n return self.H().matmat(V)\n\n def forward(self, v):\n \"\"\" Matrix-vector or matrix-matrix multiplication. \"\"\"\n return self*v\n\n def __mul__(self, v):\n return self.dot(v)\n\n def dot(self, v):\n \"\"\" Matrix-matrix or matrix-vector multiplication.\n Parameters\n ----------\n v : array_like\n 1-d or 2-d array, representing a vector or matrix.\n Returns\n -------\n Av : array\n 1-d or 2-d array (depending on the shape of x) that represents\n the result of applying this linear operator on x.\n \"\"\"\n if isinstance(v, LinearOperator):\n return _ProductLinearOperator(self, v)\n if torch.is_tensor(v):\n if v.ndim == 0:\n return _ScaledLinearOperator(self, v)\n if v.ndim == 1 or v.ndim == 2 and v.size(1) == 1:\n return self.matvec(v)\n if v.ndim == 2:\n return self.matmat(v)\n raise ValueError(f'expected 1-d or 2-d array or matrix, got {v}')\n\n def __matmul__(self, other):\n if isscalar(other):\n raise ValueError(\"Scalar operands are not allowed, use '*' instead\")\n return self.__mul__(other)\n\n def __rmatmul__(self, other):\n if isscalar(other):\n raise ValueError(\"Scalar operands are not allowed, use '*' instead\")\n return self.__rmul__(other)\n\n def __rmul__(self, x):\n if isscalar(x):\n return _ScaledLinearOperator(self, x)\n return NotImplemented\n\n def __pow__(self, p):\n if isscalar(p):\n return _PowerLinearOperator(self, p)\n return NotImplemented\n\n def __add__(self, x):\n if isinstance(x, LinearOperator):\n return _SumLinearOperator(self, x)\n if torch.is_tensor(x) and x.ndim == 2:\n return _SumLinearOperator(self, Lazy(x))\n return NotImplemented\n\n def __radd__(self, x):\n return self.__add__(x)\n\n def __neg__(self):\n return _ScaledLinearOperator(self, -1)\n\n def __sub__(self, x):\n return self.__add__(-x)\n\n def __repr__(self):\n M, N = self.shape\n if self.dtype is None:\n dtype = 'unspecified dtype'\n else:\n dtype = 'dtype=' + str(self.dtype)\n\n return f'<{M}x{N} {self.__class__.__name__} with {dtype}>'\n\n def adjoint(self):\n \"\"\" Hermitian adjoint.\n Returns the Hermitian adjoint of self, aka the Hermitian\n conjugate or Hermitian transpose. For a complex matrix, the\n Hermitian adjoint is equal to the conjugate transpose.\n Can be abbreviated self.H instead of self.adjoint().\n Returns\n -------\n A_H : LinearOperator\n Hermitian adjoint of self.\n \"\"\"\n return self._adjoint()\n\n def H(self):\n \"\"\" Hermitian adjoint. \"\"\"\n return self.adjoint()\n\n def transpose(self):\n \"\"\" Transpose this linear operator.\n Returns a LinearOperator that represents the transpose of this one.\n Can be abbreviated self.T instead of self.transpose().\n \"\"\"\n return self._transpose()\n\n def t(self):\n \"\"\" Transpose this linear operator. \"\"\"\n return self.transpose()\n\n def _adjoint(self):\n \"\"\" Default implementation of _adjoint; defers to rmatvec. \"\"\"\n return _AdjointLinearOperator(self)\n\n def _transpose(self):\n \"\"\" Default implementation of _transpose; defers to rmatvec + conj\"\"\"\n return _TransposedLinearOperator(self)\n\n def invt(self):\n \"\"\" Default implementation of inverse transpose; defers to inv + T \"\"\"\n return (self ** -1).transpose()\n\n def to_dense(self):\n \"\"\" Default implementation of to_dense which produces the dense\n matrix corresponding to the given lazy matrix. Defaults to\n multiplying by the identity \"\"\"\n return [email protected](self.size(-1), device=self.device)\n\n def to(self, device):\n \"\"\" Move this linear operator to a new device. \"\"\"\n self.device = torch.empty(0).to(device).device\n return self" }, { "identifier": "ConcatLazy", "path": "RotatedMNIST/LPS/emlp-pytorch/emlp_pytorch/reps/linear_operators.py", "snippet": "class ConcatLazy(LinearOperator):\n \"\"\" Produces a linear operator equivalent to concatenating\n a collection of matrices Ms along axis=0 \"\"\"\n\n def __init__(self, Ms):\n super().__init__()\n self.Ms = Ms\n assert all(M.size(0) == Ms[0].size(0) for M in Ms),\\\n f\"Trying to concatenate matrices of different sizes {[M.shape for M in Ms]}\"\n shape = (sum(M.size(0) for M in Ms), Ms[0].size(1))\n device = get_device(Ms)\n self.init(None, shape, device)\n self.to(self.device)\n\n def _matmat(self, V):\n return torch.cat([M@V for M in self.Ms])\n\n def _rmatmat(self, V):\n Vs = torch.chunk(V, len(self.Ms))\n return sum(Mi.t()@Vi for Mi, Vi in zip(self.Ms, Vs))\n\n def to_dense(self):\n dense_Ms = [M.to_dense() if isinstance(M, LinearOperator) else M for M in self.Ms]\n return torch.cat(dense_Ms)\n\n def to(self, device):\n self.Ms = [M.to(device) for M in self.Ms]\n self.device = torch.empty(0).to(device).device\n return self" }, { "identifier": "I", "path": "RotatedMNIST/LPS/emlp-pytorch/emlp_pytorch/reps/linear_operators.py", "snippet": "class I(LinearOperator):\n \"\"\" Identity operator. \"\"\"\n\n def __init__(self, d, device=None):\n super().__init__()\n shape = (d, d)\n self.init(None, shape, device)\n\n def _matmat(self, V): # (c,k)\n return V\n\n def _matvec(self, v):\n return v\n\n def _adjoint(self):\n return self\n\n def invt(self):\n return self" }, { "identifier": "lazify", "path": "RotatedMNIST/LPS/emlp-pytorch/emlp_pytorch/reps/linear_operators.py", "snippet": "def lazify(x):\n \"\"\" Convert a tensor LinearOperator. \"\"\"\n if isinstance(x, LinearOperator):\n return x\n if torch.is_tensor(x):\n return Lazy(x)\n raise NotImplementedError" }, { "identifier": "densify", "path": "RotatedMNIST/LPS/emlp-pytorch/emlp_pytorch/reps/linear_operators.py", "snippet": "def densify(x):\n \"\"\" Convert a LinearOperator to a dense tensor. \"\"\"\n if isinstance(x, LinearOperator):\n return x.to_dense()\n if torch.is_tensor(x):\n return x\n raise NotImplementedError" }, { "identifier": "LazyJVP", "path": "RotatedMNIST/LPS/emlp-pytorch/emlp_pytorch/reps/linear_operators.py", "snippet": "class LazyJVP(LinearOperator):\n \"\"\" Lazy Jacobian-vector product. \"\"\"\n\n def __init__(self, operator_fn, X, TX):\n super().__init__()\n self.operator_fn = operator_fn\n self.X = X\n self.TX = TX\n self.init(torch.float, operator_fn(X).shape, X.device)\n self.to(self.device)\n\n def vjp(self, v):\n \"\"\" Computes the vector-Jacobian product \"\"\"\n return torch.autograd.functional.jvp(\n lambda x: self.operator_fn(x)@v, [self.X], [self.TX])[1]\n\n def vjp_T(self, v):\n \"\"\" Computes the vector-Jacobian product \"\"\"\n return torch.autograd.functional.jvp(\n lambda x: self.operator_fn(x).t()@v, [self.X], [self.TX])[1]\n\n def _matmat(self, V):\n return self.vjp(V)\n\n def _matvec(self, v):\n return self.vjp(v)\n\n def _rmatmat(self, V):\n return self.vjp_T(V)\n\n def to(self, device):\n self.X = self.X.to(device)\n self.TX = self.TX.to(device)\n self.device = self.X.device\n return self" }, { "identifier": "LazyPerm", "path": "RotatedMNIST/LPS/emlp-pytorch/emlp_pytorch/reps/linear_operators.py", "snippet": "class LazyPerm(LinearOperator):\n \"\"\" Lazy permutation. \"\"\"\n\n def __init__(self, perm):\n super().__init__()\n self.perm = perm\n shape = (len(perm), len(perm))\n self.init(None, shape, perm.device)\n\n def _matmat(self, V):\n return V[self.perm]\n\n def _matvec(self, v):\n return v[self.perm]\n\n def _adjoint(self):\n return LazyPerm(torch.argsort(self.perm))\n\n def invt(self):\n return self\n\n def to(self, device):\n self.perm = self.perm.to(device)\n self.device = self.perm.device\n return self" }, { "identifier": "LazyDirectSum", "path": "RotatedMNIST/LPS/emlp-pytorch/emlp_pytorch/reps/linear_operators.py", "snippet": "class LazyDirectSum(LinearOperator):\n \"\"\" Lazy direct sum. \"\"\"\n\n def __init__(self, Ms, multiplicities=None):\n super().__init__()\n self.Ms = Ms\n self.multiplicities = [1 for _ in Ms] if multiplicities is None else multiplicities\n shape = (sum(Mi.size(0)*c for Mi, c in zip(Ms, multiplicities)),\n sum(Mi.size(0)*c for Mi, c in zip(Ms, multiplicities)))\n device = get_device(Ms)\n self.init(None, shape, device)\n self.to(self.device)\n\n def _matvec(self, v):\n return lazy_direct_matmat(v, self.Ms, self.multiplicities)\n\n def _matmat(self, V): # (n,k)\n return lazy_direct_matmat(V, self.Ms, self.multiplicities)\n\n def _adjoint(self):\n return LazyDirectSum([Mi.t() for Mi in self.Ms])\n\n def invt(self):\n return LazyDirectSum([M.invt() for M in self.Ms])\n\n def to_dense(self):\n Ms_all = [M for M, c in zip(self.Ms, self.multiplicities)\n for _ in range(c)]\n Ms_all = [Mi.to_dense() if isinstance(Mi, LinearOperator)\n else Mi for Mi in Ms_all]\n return torch.block_diag(*Ms_all)\n\n def to(self, device):\n self.Ms = [M.to(device) for M in self.Ms]\n self.device = torch.empty(0).to(device).device\n return self" }, { "identifier": "LazyKron", "path": "RotatedMNIST/LPS/emlp-pytorch/emlp_pytorch/reps/linear_operators.py", "snippet": "class LazyKron(LinearOperator):\n \"\"\" Lazy tensor product. \"\"\"\n\n def __init__(self, Ms):\n super().__init__()\n self.Ms = Ms\n shape = product([Mi.size(0) for Mi in Ms]), product([Mi.size(1) for Mi in Ms])\n device = get_device(Ms)\n self.init(None, shape, device)\n self.to(self.device)\n\n def _matvec(self, v):\n return self._matmat(v).reshape(-1)\n\n def _matmat(self, V):\n eV = V.reshape(*[Mi.size(-1) for Mi in self.Ms], -1)\n for i, M in enumerate(self.Ms):\n eV_front = torch.movedim(eV, i, 0)\n MeV_front = (M@eV_front.reshape(M.size(-1), -1)).reshape(M.size(0), *eV_front.shape[1:])\n eV = torch.movedim(MeV_front, 0, i)\n return eV.reshape(self.size(0), eV.size(-1))\n\n def _adjoint(self):\n return LazyKron([Mi.t() for Mi in self.Ms])\n\n def invt(self):\n return LazyKron([M.invt() for M in self.Ms])\n\n def to_dense(self):\n self.to(self.device)\n Ms = [M.to_dense() if isinstance(M, LinearOperator) else M for M in self.Ms]\n return reduce(torch.kron, Ms)\n\n def __new__(cls, Ms):\n if len(Ms) == 1:\n return Ms[0]\n return super().__new__(cls)\n\n def to(self, device):\n self.Ms = [M.to(device) for M in self.Ms]\n self.device = torch.empty(0).to(device).device\n return self" }, { "identifier": "LazyKronsum", "path": "RotatedMNIST/LPS/emlp-pytorch/emlp_pytorch/reps/linear_operators.py", "snippet": "class LazyKronsum(LinearOperator):\n \"\"\" Lazy tensor sum. \"\"\"\n\n def __init__(self, Ms):\n super().__init__()\n self.Ms = Ms\n shape = product([Mi.size(0) for Mi in Ms]), product([Mi.size(1) for Mi in Ms])\n dtype = torch.float\n device = get_device(Ms)\n self.init(dtype, shape, device)\n self.to(self.device)\n\n def _matvec(self, v):\n return self._matmat(v).reshape(-1)\n\n def _matmat(self, V):\n eV = V.reshape(*[Mi.size(-1) for Mi in self.Ms], -1)\n out = 0*eV\n for i, M in enumerate(self.Ms):\n eV_front = torch.movedim(eV, i, 0)\n M, eV_front = dtype_cast(M, eV_front)\n MeV_front = (M@eV_front.reshape(M.size(-1), -1)).reshape(M.size(0), *eV_front.shape[1:])\n out, MeV_front = dtype_cast(out, MeV_front)\n out += torch.movedim(MeV_front, 0, i)\n return out.reshape(self.size(0), eV.size(-1))\n\n def _adjoint(self):\n return LazyKronsum([Mi.t() for Mi in self.Ms])\n\n def to_dense(self):\n Ms = [M.to_dense() if isinstance(M, LinearOperator) else M for M in self.Ms]\n return reduce(kronsum, Ms)\n\n def __new__(cls, Ms):\n if len(Ms) == 1:\n return Ms[0]\n return super().__new__(cls)\n\n # could also be implemented as follows,\n # but fusing the sum into a single linearOperator is faster\n # def lazy_kronsum(Ms):\n # n = len(Ms)\n # lprod = np.cumprod([1]+[mi.size(-1) for mi in Ms])\n # rprod = np.cumprod([1]+[mi.size(-1) for mi in reversed(Ms)])[::-1]\n # return reduce(lambda a,b: a+b,[lazy_kron([I(lprod[i]),Mi,I(rprod[i+1])])\n # for i,Mi in enumerate(Ms)])\n\n def to(self, device):\n self.Ms = [M.to(device) for M in self.Ms]\n self.device = torch.empty(0).to(device).device\n return self" }, { "identifier": "lazy_direct_matmat", "path": "RotatedMNIST/LPS/emlp-pytorch/emlp_pytorch/reps/linear_operators.py", "snippet": "def lazy_direct_matmat(v, Ms, mults):\n \"\"\" Computes the matrix-vector product of a direct sum of matrices\n with a vector. \"\"\"\n k = v.size(1) if len(v.shape) > 1 else 1\n i = 0\n y = []\n for M, multiplicity in zip(Ms, mults):\n i_end = i+multiplicity*M.size(-1)\n elems = M@v[i:i_end][None].reshape(k*multiplicity, M.size(-1)).t()\n y.append(elems.t().reshape(k, multiplicity*M.size(0)).t())\n i = i_end\n y = torch.cat(y) # concatenate over rep axis\n return y" }, { "identifier": "product", "path": "RotatedMNIST/LPS/emlp-pytorch/emlp_pytorch/reps/linear_operators.py", "snippet": "def product(c):\n \"\"\" Product of a list of numbers. \"\"\"\n return reduce(lambda a, b: a*b, c)" }, { "identifier": "orthogonal_complement", "path": "RotatedMNIST/LPS/emlp-pytorch/emlp_pytorch/reps/utils.py", "snippet": "def orthogonal_complement(proj):\n \"\"\" Computes the orthogonal complement to a given matrix proj\"\"\"\n _, S, Vh = torch.linalg.svd(proj, full_matrices=True)\n rank = (S > 1e-5).sum()\n return Vh[rank:].conj().t()" }, { "identifier": "krylov_constraint_solve", "path": "RotatedMNIST/LPS/emlp-pytorch/emlp_pytorch/reps/utils.py", "snippet": "def krylov_constraint_solve(C, tol=1e-5):\n \"\"\" Computes the solution basis Q for the linear constraint CQ=0 and QᵀQ=I\n up to specified tolerance with C expressed as a LinearOperator. \"\"\"\n r = 5\n if C.size(0)*r*2 > 2e9:\n raise RuntimeError(f\"Solns for contraints {C.shape} too large to fit in memory\")\n found_rank = 5\n while found_rank == r:\n r *= 2 # Iterative doubling of rank until large enough to include the full solution space\n if C.size(0)*r > 2e9:\n logging.error(\"Hit memory limits, switching to \"\n \"sample equivariant subspace of size %r\", found_rank)\n break\n Q = krylov_constraint_solve_upto_r(C, r, tol)\n found_rank = Q.size(-1)\n return Q" }, { "identifier": "get_device", "path": "RotatedMNIST/LPS/emlp-pytorch/emlp_pytorch/reps/utils.py", "snippet": "def get_device(operators, devices=None):\n \"\"\" Returns the device of the first operator that has a device attribute. \"\"\"\n if devices is None:\n devices = []\n for obj in operators:\n if obj is not None and hasattr(obj, 'device') and obj.device.type != 'cpu':\n return obj.device\n return torch.device('cpu')" } ]
import math import logging import itertools import torch from functools import lru_cache as cache, reduce from collections import defaultdict from plum import dispatch from torch import nn from ..groups import Group from .linear_operator_base import LinearOperator from .linear_operators import ConcatLazy, I, lazify, densify, LazyJVP, LazyPerm, \ LazyDirectSum, LazyKron, LazyKronsum, lazy_direct_matmat, product from .utils import orthogonal_complement, krylov_constraint_solve, get_device
9,361
""" The base Representation class. """ class Rep(nn.Module): """ The base Representation class. Representation objects formalize the vector space V on which the group acts, the group representation matrix ρ(g), and the Lie Algebra representation dρ(A) in a single object. Representations act as types for vectors coming from V. These types can be manipulated and transformed with the built in operators ⊕,⊗,dual, as well as incorporating custom representations. Representation objects should be immutable. At minimum, new representations need to implement ``rho``, ``__str__``.""" def __init__(self): super().__init__() self.is_permutation = False self._size = None self.G = None def rho(self, M): """ Group representation of the matrix M of shape (d,d)""" raise NotImplementedError def drho(self, A): """ Lie Algebra representation of the matrix A of shape (d,d)""" In = torch.eye(A.size(0), dtype=A.dtype, device=A.device) return LazyJVP(self.rho, In, A) def forward(self, G): """ Instantiate (nonconcrete) representation with a symmetry group (forward) """ raise NotImplementedError def __str__(self): return repr(self) def __repr__(self): raise NotImplementedError def __eq__(self, other): if type(self) is not type(other): # pylint: disable=unidiomatic-typecheck return False return self.__hash__() == other.__hash__() def __hash__(self): raise NotImplementedError def size(self): """ Dimension dim(V) of the representation """ if self._size is not None: return self._size if self.concrete() and isinstance(self.G, Group): self._size = self.rho(self.G.sample()).size(-1) return self._size raise NotImplementedError def canonicalize(self): """ An optional method to convert the representation into a canonical form in order to reuse equivalent solutions in the solver. Should return both the canonically ordered representation, along with a permutation which can be applied to vectors of the current representation to achieve that ordering. """ # return canonicalized rep return self, torch.arange(self.size()) def rho_dense(self, M): """ A convenience function which returns rho(M) as a dense matrix."""
""" The base Representation class. """ class Rep(nn.Module): """ The base Representation class. Representation objects formalize the vector space V on which the group acts, the group representation matrix ρ(g), and the Lie Algebra representation dρ(A) in a single object. Representations act as types for vectors coming from V. These types can be manipulated and transformed with the built in operators ⊕,⊗,dual, as well as incorporating custom representations. Representation objects should be immutable. At minimum, new representations need to implement ``rho``, ``__str__``.""" def __init__(self): super().__init__() self.is_permutation = False self._size = None self.G = None def rho(self, M): """ Group representation of the matrix M of shape (d,d)""" raise NotImplementedError def drho(self, A): """ Lie Algebra representation of the matrix A of shape (d,d)""" In = torch.eye(A.size(0), dtype=A.dtype, device=A.device) return LazyJVP(self.rho, In, A) def forward(self, G): """ Instantiate (nonconcrete) representation with a symmetry group (forward) """ raise NotImplementedError def __str__(self): return repr(self) def __repr__(self): raise NotImplementedError def __eq__(self, other): if type(self) is not type(other): # pylint: disable=unidiomatic-typecheck return False return self.__hash__() == other.__hash__() def __hash__(self): raise NotImplementedError def size(self): """ Dimension dim(V) of the representation """ if self._size is not None: return self._size if self.concrete() and isinstance(self.G, Group): self._size = self.rho(self.G.sample()).size(-1) return self._size raise NotImplementedError def canonicalize(self): """ An optional method to convert the representation into a canonical form in order to reuse equivalent solutions in the solver. Should return both the canonically ordered representation, along with a permutation which can be applied to vectors of the current representation to achieve that ordering. """ # return canonicalized rep return self, torch.arange(self.size()) def rho_dense(self, M): """ A convenience function which returns rho(M) as a dense matrix."""
return densify(self.rho(M))
5
2023-11-01 07:19:02+00:00
12k
xenxxxx/BitPay-Crypto-Signal-Trading-Bot
tests/conftest.py
[ { "identifier": "leverage_trade", "path": "tests/conftest_trades.py", "snippet": "def leverage_trade(fee):\n \"\"\"\n 5 hour short limit trade on kraken\n\n Short trade\n fee: 0.25% base\n interest_rate: 0.05% per day\n open_rate: 0.123 base\n close_rate: 0.128 base\n amount: 615 crypto\n stake_amount: 15.129 base\n borrowed: 60.516 base\n leverage: 5\n hours: 5\n interest: borrowed * interest_rate * ceil(1 + hours/4)\n = 60.516 * 0.0005 * ceil(1 + 5/4) = 0.090774 base\n open_value: (amount * open_rate) + (amount * open_rate * fee)\n = (615.0 * 0.123) + (615.0 * 0.123 * 0.0025)\n = 75.83411249999999\n\n close_value: (amount_closed * close_rate) - (amount_closed * close_rate * fee) - interest\n = (615.0 * 0.128) - (615.0 * 0.128 * 0.0025) - 0.090774\n = 78.432426\n total_profit = close_value - open_value\n = 78.432426 - 75.83411249999999\n = 2.5983135000000175\n total_profit_percentage = ((close_value/open_value)-1) * leverage\n = ((78.432426/75.83411249999999)-1) * 5\n = 0.1713156134055116\n \"\"\"\n trade = Trade(\n pair='DOGE/BTC',\n stake_amount=15.129,\n amount=615.0,\n leverage=5.0,\n amount_requested=615.0,\n fee_open=fee.return_value,\n fee_close=fee.return_value,\n open_rate=0.123,\n close_rate=0.128,\n close_profit=0.1713156134055116,\n close_profit_abs=2.5983135000000175,\n exchange='kraken',\n is_open=False,\n strategy='DefaultStrategy',\n timeframe=5,\n exit_reason='sell_signal',\n open_date=datetime.now(tz=timezone.utc) - timedelta(minutes=300),\n close_date=datetime.now(tz=timezone.utc),\n interest_rate=0.0005\n )\n o = Order.parse_from_ccxt_object(leverage_order(), 'DOGE/BTC', 'sell')\n trade.orders.append(o)\n o = Order.parse_from_ccxt_object(leverage_order_sell(), 'DOGE/BTC', 'sell')\n trade.orders.append(o)\n return trade" }, { "identifier": "mock_trade_1", "path": "tests/conftest_trades.py", "snippet": "def mock_trade_1(fee, is_short: bool):\n trade = Trade(\n pair='ETH/BTC',\n stake_amount=0.001,\n amount=123.0,\n amount_requested=123.0,\n fee_open=fee.return_value,\n fee_close=fee.return_value,\n is_open=True,\n open_date=datetime.now(tz=timezone.utc) - timedelta(minutes=17),\n open_rate=0.123,\n exchange='binance',\n strategy='StrategyTestV3',\n timeframe=5,\n is_short=is_short\n )\n o = Order.parse_from_ccxt_object(mock_order_1(is_short), 'ETH/BTC', entry_side(is_short))\n trade.orders.append(o)\n return trade" }, { "identifier": "mock_trade_2", "path": "tests/conftest_trades.py", "snippet": "def mock_trade_2(fee, is_short: bool):\n \"\"\"\n Closed trade...\n \"\"\"\n trade = Trade(\n pair='ETC/BTC',\n stake_amount=0.001,\n amount=123.0,\n amount_requested=123.0,\n fee_open=fee.return_value,\n fee_close=fee.return_value,\n open_rate=0.123,\n close_rate=0.128,\n close_profit=-0.005 if is_short else 0.005,\n close_profit_abs=-0.005584127 if is_short else 0.000584127,\n exchange='binance',\n is_open=False,\n strategy='StrategyTestV3',\n timeframe=5,\n enter_tag='TEST1',\n exit_reason='sell_signal',\n open_date=datetime.now(tz=timezone.utc) - timedelta(minutes=20),\n close_date=datetime.now(tz=timezone.utc) - timedelta(minutes=2),\n is_short=is_short\n )\n o = Order.parse_from_ccxt_object(mock_order_2(is_short), 'ETC/BTC', entry_side(is_short))\n trade.orders.append(o)\n o = Order.parse_from_ccxt_object(mock_order_2_sell(is_short), 'ETC/BTC', exit_side(is_short))\n trade.orders.append(o)\n return trade" }, { "identifier": "mock_trade_3", "path": "tests/conftest_trades.py", "snippet": "def mock_trade_3(fee, is_short: bool):\n \"\"\"\n Closed trade\n \"\"\"\n trade = Trade(\n pair='XRP/BTC',\n stake_amount=0.001,\n amount=123.0,\n amount_requested=123.0,\n fee_open=fee.return_value,\n fee_close=fee.return_value,\n open_rate=0.05,\n close_rate=0.06,\n close_profit=-0.01 if is_short else 0.01,\n close_profit_abs=-0.001155 if is_short else 0.000155,\n exchange='binance',\n is_open=False,\n strategy='StrategyTestV3',\n timeframe=5,\n exit_reason='roi',\n open_date=datetime.now(tz=timezone.utc) - timedelta(minutes=20),\n close_date=datetime.now(tz=timezone.utc),\n is_short=is_short\n )\n o = Order.parse_from_ccxt_object(mock_order_3(is_short), 'XRP/BTC', entry_side(is_short))\n trade.orders.append(o)\n o = Order.parse_from_ccxt_object(mock_order_3_sell(is_short), 'XRP/BTC', exit_side(is_short))\n trade.orders.append(o)\n return trade" }, { "identifier": "mock_trade_4", "path": "tests/conftest_trades.py", "snippet": "def mock_trade_4(fee, is_short: bool):\n \"\"\"\n Simulate prod entry\n \"\"\"\n trade = Trade(\n pair='ETC/BTC',\n stake_amount=0.001,\n amount=123.0,\n amount_requested=124.0,\n fee_open=fee.return_value,\n fee_close=fee.return_value,\n open_date=datetime.now(tz=timezone.utc) - timedelta(minutes=14),\n is_open=True,\n open_rate=0.123,\n exchange='binance',\n strategy='StrategyTestV3',\n timeframe=5,\n is_short=is_short,\n stop_loss_pct=0.10\n )\n o = Order.parse_from_ccxt_object(mock_order_4(is_short), 'ETC/BTC', entry_side(is_short))\n trade.orders.append(o)\n return trade" }, { "identifier": "mock_trade_5", "path": "tests/conftest_trades.py", "snippet": "def mock_trade_5(fee, is_short: bool):\n \"\"\"\n Simulate prod entry with stoploss\n \"\"\"\n trade = Trade(\n pair='XRP/BTC',\n stake_amount=0.001,\n amount=123.0,\n amount_requested=124.0,\n fee_open=fee.return_value,\n fee_close=fee.return_value,\n open_date=datetime.now(tz=timezone.utc) - timedelta(minutes=12),\n is_open=True,\n open_rate=0.123,\n exchange='binance',\n strategy='SampleStrategy',\n enter_tag='TEST1',\n stoploss_order_id=f'prod_stoploss_{direc(is_short)}_3455',\n timeframe=5,\n is_short=is_short,\n stop_loss_pct=0.10,\n )\n o = Order.parse_from_ccxt_object(mock_order_5(is_short), 'XRP/BTC', entry_side(is_short))\n trade.orders.append(o)\n o = Order.parse_from_ccxt_object(mock_order_5_stoploss(is_short), 'XRP/BTC', 'stoploss')\n trade.orders.append(o)\n return trade" }, { "identifier": "mock_trade_6", "path": "tests/conftest_trades.py", "snippet": "def mock_trade_6(fee, is_short: bool):\n \"\"\"\n Simulate prod entry with open exit order\n \"\"\"\n trade = Trade(\n pair='LTC/BTC',\n stake_amount=0.001,\n amount=2.0,\n amount_requested=2.0,\n open_date=datetime.now(tz=timezone.utc) - timedelta(minutes=5),\n fee_open=fee.return_value,\n fee_close=fee.return_value,\n is_open=True,\n open_rate=0.15,\n exchange='binance',\n strategy='SampleStrategy',\n enter_tag='TEST2',\n timeframe=5,\n is_short=is_short\n )\n o = Order.parse_from_ccxt_object(mock_order_6(is_short), 'LTC/BTC', entry_side(is_short))\n trade.orders.append(o)\n o = Order.parse_from_ccxt_object(mock_order_6_sell(is_short), 'LTC/BTC', exit_side(is_short))\n trade.orders.append(o)\n return trade" }, { "identifier": "short_trade", "path": "tests/conftest_trades.py", "snippet": "def short_trade(fee):\n \"\"\"\n 10 minute short limit trade on binance\n\n Short trade\n fee: 0.25% base\n interest_rate: 0.05% per day\n open_rate: 0.123 base\n close_rate: 0.128 base\n amount: 123.0 crypto\n stake_amount: 15.129 base\n borrowed: 123.0 crypto\n time-periods: 10 minutes(rounds up to 1/24 time-period of 1 day)\n interest: borrowed * interest_rate * time-periods\n = 123.0 * 0.0005 * 1/24 = 0.0025625 crypto\n open_value: (amount * open_rate) - (amount * open_rate * fee)\n = (123 * 0.123) - (123 * 0.123 * 0.0025)\n = 15.091177499999999\n amount_closed: amount + interest = 123 + 0.0025625 = 123.0025625\n close_value: (amount_closed * close_rate) + (amount_closed * close_rate * fee)\n = (123.0025625 * 0.128) + (123.0025625 * 0.128 * 0.0025)\n = 15.78368882\n total_profit = open_value - close_value\n = 15.091177499999999 - 15.78368882\n = -0.6925113200000013\n total_profit_percentage = total_profit / stake_amount\n = -0.6925113200000013 / 15.129\n = -0.04577376693766946\n\n \"\"\"\n trade = Trade(\n pair='ETC/BTC',\n stake_amount=15.129,\n amount=123.0,\n amount_requested=123.0,\n fee_open=fee.return_value,\n fee_close=fee.return_value,\n open_rate=0.123,\n # close_rate=0.128,\n # close_profit=-0.04577376693766946,\n # close_profit_abs=-0.6925113200000013,\n exchange='binance',\n is_open=True,\n strategy='DefaultStrategy',\n timeframe=5,\n exit_reason='sell_signal',\n open_date=datetime.now(tz=timezone.utc) - timedelta(minutes=20),\n # close_date=datetime.now(tz=timezone.utc) - timedelta(minutes=2),\n is_short=True\n )\n o = Order.parse_from_ccxt_object(short_order(), 'ETC/BTC', 'sell')\n trade.orders.append(o)\n o = Order.parse_from_ccxt_object(exit_short_order(), 'ETC/BTC', 'sell')\n trade.orders.append(o)\n return trade" }, { "identifier": "mock_trade_usdt_1", "path": "tests/conftest_trades_usdt.py", "snippet": "def mock_trade_usdt_1(fee, is_short: bool):\n \"\"\"\n Simulate prod entry with open sell order\n \"\"\"\n trade = Trade(\n pair='LTC/USDT',\n stake_amount=20.0,\n amount=2.0,\n amount_requested=2.0,\n open_date=datetime.now(tz=timezone.utc) - timedelta(days=2, minutes=20),\n close_date=datetime.now(tz=timezone.utc) - timedelta(days=2, minutes=5),\n fee_open=fee.return_value,\n fee_close=fee.return_value,\n is_open=False,\n open_rate=10.0,\n close_rate=8.0,\n close_profit=-0.2,\n close_profit_abs=-4.09,\n exchange='binance',\n strategy='SampleStrategy',\n timeframe=5,\n is_short=is_short,\n )\n o = Order.parse_from_ccxt_object(mock_order_usdt_1(is_short), 'LTC/USDT', entry_side(is_short))\n trade.orders.append(o)\n o = Order.parse_from_ccxt_object(mock_order_usdt_1_exit(is_short),\n 'LTC/USDT', exit_side(is_short))\n trade.orders.append(o)\n return trade" }, { "identifier": "mock_trade_usdt_2", "path": "tests/conftest_trades_usdt.py", "snippet": "def mock_trade_usdt_2(fee, is_short: bool):\n \"\"\"\n Closed trade...\n \"\"\"\n trade = Trade(\n pair='NEO/USDT',\n stake_amount=200.0,\n amount=100.0,\n amount_requested=100.0,\n fee_open=fee.return_value,\n fee_close=fee.return_value,\n open_rate=2.0,\n close_rate=2.05,\n close_profit=0.05,\n close_profit_abs=3.9875,\n exchange='binance',\n is_open=False,\n strategy='StrategyTestV2',\n timeframe=5,\n enter_tag='TEST1',\n exit_reason='exit_signal',\n open_date=datetime.now(tz=timezone.utc) - timedelta(minutes=20),\n close_date=datetime.now(tz=timezone.utc) - timedelta(minutes=2),\n is_short=is_short,\n )\n o = Order.parse_from_ccxt_object(mock_order_usdt_2(is_short), 'NEO/USDT', entry_side(is_short))\n trade.orders.append(o)\n o = Order.parse_from_ccxt_object(\n mock_order_usdt_2_exit(is_short), 'NEO/USDT', exit_side(is_short))\n trade.orders.append(o)\n return trade" }, { "identifier": "mock_trade_usdt_3", "path": "tests/conftest_trades_usdt.py", "snippet": "def mock_trade_usdt_3(fee, is_short: bool):\n \"\"\"\n Closed trade\n \"\"\"\n trade = Trade(\n pair='XRP/USDT',\n stake_amount=30.0,\n amount=30.0,\n amount_requested=30.0,\n fee_open=fee.return_value,\n fee_close=fee.return_value,\n open_rate=1.0,\n close_rate=1.1,\n close_profit=0.1,\n close_profit_abs=2.8425,\n exchange='binance',\n is_open=False,\n strategy='StrategyTestV2',\n timeframe=5,\n enter_tag='TEST3',\n exit_reason='roi',\n open_date=datetime.now(tz=timezone.utc) - timedelta(minutes=20),\n close_date=datetime.now(tz=timezone.utc),\n is_short=is_short,\n )\n o = Order.parse_from_ccxt_object(mock_order_usdt_3(is_short), 'XRP/USDT', entry_side(is_short))\n trade.orders.append(o)\n o = Order.parse_from_ccxt_object(mock_order_usdt_3_exit(is_short),\n 'XRP/USDT', exit_side(is_short))\n trade.orders.append(o)\n return trade" }, { "identifier": "mock_trade_usdt_4", "path": "tests/conftest_trades_usdt.py", "snippet": "def mock_trade_usdt_4(fee, is_short: bool):\n \"\"\"\n Simulate prod entry\n \"\"\"\n trade = Trade(\n pair='NEO/USDT',\n stake_amount=20.0,\n amount=10.0,\n amount_requested=10.01,\n fee_open=fee.return_value,\n fee_close=fee.return_value,\n open_date=datetime.now(tz=timezone.utc) - timedelta(minutes=14),\n is_open=True,\n open_rate=2.0,\n exchange='binance',\n strategy='StrategyTestV2',\n timeframe=5,\n is_short=is_short,\n )\n o = Order.parse_from_ccxt_object(mock_order_usdt_4(is_short), 'NEO/USDT', entry_side(is_short))\n trade.orders.append(o)\n return trade" }, { "identifier": "mock_trade_usdt_5", "path": "tests/conftest_trades_usdt.py", "snippet": "def mock_trade_usdt_5(fee, is_short: bool):\n \"\"\"\n Simulate prod entry with stoploss\n \"\"\"\n trade = Trade(\n pair='XRP/USDT',\n stake_amount=20.0,\n amount=10.0,\n amount_requested=10.01,\n fee_open=fee.return_value,\n fee_close=fee.return_value,\n open_date=datetime.now(tz=timezone.utc) - timedelta(minutes=12),\n is_open=True,\n open_rate=2.0,\n exchange='binance',\n strategy='SampleStrategy',\n stoploss_order_id=f'prod_stoploss_3455_{direc(is_short)}',\n timeframe=5,\n is_short=is_short,\n )\n o = Order.parse_from_ccxt_object(mock_order_usdt_5(is_short), 'XRP/USDT', entry_side(is_short))\n trade.orders.append(o)\n o = Order.parse_from_ccxt_object(mock_order_usdt_5_stoploss(is_short), 'XRP/USDT', 'stoploss')\n trade.orders.append(o)\n return trade" }, { "identifier": "mock_trade_usdt_6", "path": "tests/conftest_trades_usdt.py", "snippet": "def mock_trade_usdt_6(fee, is_short: bool):\n \"\"\"\n Simulate prod entry with open sell order\n \"\"\"\n trade = Trade(\n pair='LTC/USDT',\n stake_amount=20.0,\n amount=2.0,\n amount_requested=2.0,\n open_date=datetime.now(tz=timezone.utc) - timedelta(minutes=5),\n fee_open=fee.return_value,\n fee_close=fee.return_value,\n is_open=True,\n open_rate=10.0,\n exchange='binance',\n strategy='SampleStrategy',\n timeframe=5,\n is_short=is_short,\n )\n o = Order.parse_from_ccxt_object(mock_order_usdt_6(is_short), 'LTC/USDT', entry_side(is_short))\n trade.orders.append(o)\n o = Order.parse_from_ccxt_object(mock_order_usdt_6_exit(is_short),\n 'LTC/USDT', exit_side(is_short))\n trade.orders.append(o)\n return trade" }, { "identifier": "mock_trade_usdt_7", "path": "tests/conftest_trades_usdt.py", "snippet": "def mock_trade_usdt_7(fee, is_short: bool):\n trade = Trade(\n pair='ADA/USDT',\n stake_amount=20.0,\n amount=10.0,\n amount_requested=10.0,\n fee_open=fee.return_value,\n fee_close=fee.return_value,\n is_open=True,\n open_date=datetime.now(tz=timezone.utc) - timedelta(minutes=17),\n open_rate=2.0,\n exchange='binance',\n strategy='StrategyTestV2',\n timeframe=5,\n is_short=is_short,\n )\n o = Order.parse_from_ccxt_object(mock_order_usdt_7(is_short), 'ADA/USDT', entry_side(is_short))\n trade.orders.append(o)\n return trade" } ]
import json import logging import re import numpy as np import pandas as pd import pytest import builtins from copy import deepcopy from datetime import timedelta from pathlib import Path from typing import Optional from unittest.mock import MagicMock, Mock, PropertyMock from freqtrade import constants from freqtrade.commands import Arguments from freqtrade.data.converter import ohlcv_to_dataframe, trades_list_to_df from freqtrade.edge import PairInfo from freqtrade.enums import CandleType, MarginMode, RunMode, SignalDirection, TradingMode from freqtrade.exchange import Exchange from freqtrade.exchange.exchange import timeframe_to_minutes from freqtrade.freqtradebot import FreqtradeBot from freqtrade.persistence import LocalTrade, Order, Trade, init_db from freqtrade.resolvers import ExchangeResolver from freqtrade.util import dt_ts from freqtrade.util.datetime_helpers import dt_now from freqtrade.worker import Worker from tests.conftest_trades import (leverage_trade, mock_trade_1, mock_trade_2, mock_trade_3, mock_trade_4, mock_trade_5, mock_trade_6, short_trade) from tests.conftest_trades_usdt import (mock_trade_usdt_1, mock_trade_usdt_2, mock_trade_usdt_3, mock_trade_usdt_4, mock_trade_usdt_5, mock_trade_usdt_6, mock_trade_usdt_7)
7,370
) -> None: """ :param mocker: mocker to patch IStrategy class :return: None """ # returns (Signal-direction, signaname) def patched_get_entry_signal(*args, **kwargs): direction = None if enter_long and not any([exit_long, enter_short]): direction = SignalDirection.LONG if enter_short and not any([exit_short, enter_long]): direction = SignalDirection.SHORT return direction, enter_tag freqtrade.strategy.get_entry_signal = patched_get_entry_signal def patched_get_exit_signal(pair, timeframe, dataframe, is_short): if is_short: return enter_short, exit_short, exit_tag else: return enter_long, exit_long, exit_tag # returns (enter, exit) freqtrade.strategy.get_exit_signal = patched_get_exit_signal freqtrade.exchange.refresh_latest_ohlcv = lambda p: None def create_mock_trades(fee, is_short: Optional[bool] = False, use_db: bool = True): """ Create some fake trades ... :param is_short: Optional bool, None creates a mix of long and short trades. """ def add_trade(trade): if use_db: Trade.session.add(trade) else: LocalTrade.add_bt_trade(trade) is_short1 = is_short if is_short is not None else True is_short2 = is_short if is_short is not None else False # Simulate dry_run entries trade = mock_trade_1(fee, is_short1) add_trade(trade) trade = mock_trade_2(fee, is_short1) add_trade(trade) trade = mock_trade_3(fee, is_short2) add_trade(trade) trade = mock_trade_4(fee, is_short2) add_trade(trade) trade = mock_trade_5(fee, is_short2) add_trade(trade) trade = mock_trade_6(fee, is_short1) add_trade(trade) if use_db: Trade.commit() def create_mock_trades_with_leverage(fee, use_db: bool = True): """ Create some fake trades ... """ if use_db: Trade.session.rollback() def add_trade(trade): if use_db: Trade.session.add(trade) else: LocalTrade.add_bt_trade(trade) # Simulate dry_run entries trade = mock_trade_1(fee, False) add_trade(trade) trade = mock_trade_2(fee, False) add_trade(trade) trade = mock_trade_3(fee, False) add_trade(trade) trade = mock_trade_4(fee, False) add_trade(trade) trade = mock_trade_5(fee, False) add_trade(trade) trade = mock_trade_6(fee, False) add_trade(trade) trade = short_trade(fee) add_trade(trade) trade = leverage_trade(fee) add_trade(trade) if use_db: Trade.session.flush() def create_mock_trades_usdt(fee, is_short: Optional[bool] = False, use_db: bool = True): """ Create some fake trades ... """ def add_trade(trade): if use_db: Trade.session.add(trade) else: LocalTrade.add_bt_trade(trade) is_short1 = is_short if is_short is not None else True is_short2 = is_short if is_short is not None else False # Simulate dry_run entries
# pragma pylint: disable=missing-docstring logging.getLogger('').setLevel(logging.INFO) # Do not mask numpy errors as warnings that no one read, raise the exсeption np.seterr(all='raise') CURRENT_TEST_STRATEGY = 'StrategyTestV3' TRADE_SIDES = ('long', 'short') EXMS = 'freqtrade.exchange.exchange.Exchange' def pytest_addoption(parser): parser.addoption('--longrun', action='store_true', dest="longrun", default=False, help="Enable long-run tests (ccxt compat)") def pytest_configure(config): config.addinivalue_line( "markers", "longrun: mark test that is running slowly and should not be run regularily" ) if not config.option.longrun: setattr(config.option, 'markexpr', 'not longrun') def log_has(line, logs): """Check if line is found on some caplog's message.""" return any(line == message for message in logs.messages) def log_has_when(line, logs, when): """Check if line is found in caplog's messages during a specified stage""" return any(line == message.message for message in logs.get_records(when)) def log_has_re(line, logs): """Check if line matches some caplog's message.""" return any(re.match(line, message) for message in logs.messages) def num_log_has(line, logs): """Check how many times line is found in caplog's messages.""" return sum(line == message for message in logs.messages) def num_log_has_re(line, logs): """Check how many times line matches caplog's messages.""" return sum(bool(re.match(line, message)) for message in logs.messages) def get_args(args): return Arguments(args).get_parsed_arg() def generate_test_data(timeframe: str, size: int, start: str = '2020-07-05'): np.random.seed(42) tf_mins = timeframe_to_minutes(timeframe) base = np.random.normal(20, 2, size=size) date = pd.date_range(start, periods=size, freq=f'{tf_mins}min', tz='UTC') df = pd.DataFrame({ 'date': date, 'open': base, 'high': base + np.random.normal(2, 1, size=size), 'low': base - np.random.normal(2, 1, size=size), 'close': base + np.random.normal(0, 1, size=size), 'volume': np.random.normal(200, size=size) } ) df = df.dropna() return df def generate_test_data_raw(timeframe: str, size: int, start: str = '2020-07-05'): """ Generates data in the ohlcv format used by ccxt """ df = generate_test_data(timeframe, size, start) df['date'] = df.loc[:, 'date'].view(np.int64) // 1000 // 1000 return list(list(x) for x in zip(*(df[x].values.tolist() for x in df.columns))) # Source: https://stackoverflow.com/questions/29881236/how-to-mock-asyncio-coroutines # TODO: This should be replaced with AsyncMock once support for python 3.7 is dropped. def get_mock_coro(return_value=None, side_effect=None): async def mock_coro(*args, **kwargs): if side_effect: if isinstance(side_effect, list): effect = side_effect.pop(0) else: effect = side_effect if isinstance(effect, Exception): raise effect if callable(effect): return effect(*args, **kwargs) return effect else: return return_value return Mock(wraps=mock_coro) def patched_configuration_load_config_file(mocker, config) -> None: mocker.patch( 'freqtrade.configuration.load_config.load_config_file', lambda *args, **kwargs: config ) def patch_exchange( mocker, api_mock=None, id='binance', mock_markets=True, mock_supported_modes=True ) -> None: mocker.patch(f'{EXMS}._load_async_markets', return_value={}) mocker.patch(f'{EXMS}.validate_config', MagicMock()) mocker.patch(f'{EXMS}.validate_timeframes', MagicMock()) mocker.patch(f'{EXMS}.id', PropertyMock(return_value=id)) mocker.patch(f'{EXMS}.name', PropertyMock(return_value=id.title())) mocker.patch(f'{EXMS}.precisionMode', PropertyMock(return_value=2)) if mock_markets: if isinstance(mock_markets, bool): mock_markets = get_markets() mocker.patch(f'{EXMS}.markets', PropertyMock(return_value=mock_markets)) if mock_supported_modes: mocker.patch( f'freqtrade.exchange.{id}.{id.capitalize()}._supported_trading_mode_margin_pairs', PropertyMock(return_value=[ (TradingMode.MARGIN, MarginMode.CROSS), (TradingMode.MARGIN, MarginMode.ISOLATED), (TradingMode.FUTURES, MarginMode.CROSS), (TradingMode.FUTURES, MarginMode.ISOLATED) ]) ) if api_mock: mocker.patch(f'{EXMS}._init_ccxt', return_value=api_mock) else: mocker.patch(f'{EXMS}._init_ccxt', MagicMock()) mocker.patch(f'{EXMS}.timeframes', PropertyMock( return_value=['5m', '15m', '1h', '1d'])) def get_patched_exchange(mocker, config, api_mock=None, id='binance', mock_markets=True, mock_supported_modes=True) -> Exchange: patch_exchange(mocker, api_mock, id, mock_markets, mock_supported_modes) config['exchange']['name'] = id try: exchange = ExchangeResolver.load_exchange(config, load_leverage_tiers=True) except ImportError: exchange = Exchange(config) return exchange def patch_wallet(mocker, free=999.9) -> None: mocker.patch('freqtrade.wallets.Wallets.get_free', MagicMock( return_value=free )) def patch_whitelist(mocker, conf) -> None: mocker.patch('freqtrade.freqtradebot.FreqtradeBot._refresh_active_whitelist', MagicMock(return_value=conf['exchange']['pair_whitelist'])) def patch_edge(mocker) -> None: # "ETH/BTC", # "LTC/BTC", # "XRP/BTC", # "NEO/BTC" mocker.patch('freqtrade.edge.Edge._cached_pairs', mocker.PropertyMock( return_value={ 'NEO/BTC': PairInfo(-0.20, 0.66, 3.71, 0.50, 1.71, 10, 25), 'LTC/BTC': PairInfo(-0.21, 0.66, 3.71, 0.50, 1.71, 11, 20), } )) mocker.patch('freqtrade.edge.Edge.calculate', MagicMock(return_value=True)) # Functions for recurrent object patching def patch_freqtradebot(mocker, config) -> None: """ This function patch _init_modules() to not call dependencies :param mocker: a Mocker object to apply patches :param config: Config to pass to the bot :return: None """ mocker.patch('freqtrade.freqtradebot.RPCManager', MagicMock()) patch_exchange(mocker) mocker.patch('freqtrade.freqtradebot.RPCManager._init', MagicMock()) mocker.patch('freqtrade.freqtradebot.RPCManager.send_msg', MagicMock()) patch_whitelist(mocker, config) mocker.patch('freqtrade.freqtradebot.ExternalMessageConsumer') mocker.patch('freqtrade.configuration.config_validation._validate_consumers') def get_patched_freqtradebot(mocker, config) -> FreqtradeBot: """ This function patches _init_modules() to not call dependencies :param mocker: a Mocker object to apply patches :param config: Config to pass to the bot :return: FreqtradeBot """ patch_freqtradebot(mocker, config) return FreqtradeBot(config) def get_patched_worker(mocker, config) -> Worker: """ This function patches _init_modules() to not call dependencies :param mocker: a Mocker object to apply patches :param config: Config to pass to the bot :return: Worker """ patch_freqtradebot(mocker, config) return Worker(args=None, config=config) def patch_get_signal( freqtrade: FreqtradeBot, enter_long=True, exit_long=False, enter_short=False, exit_short=False, enter_tag: Optional[str] = None, exit_tag: Optional[str] = None, ) -> None: """ :param mocker: mocker to patch IStrategy class :return: None """ # returns (Signal-direction, signaname) def patched_get_entry_signal(*args, **kwargs): direction = None if enter_long and not any([exit_long, enter_short]): direction = SignalDirection.LONG if enter_short and not any([exit_short, enter_long]): direction = SignalDirection.SHORT return direction, enter_tag freqtrade.strategy.get_entry_signal = patched_get_entry_signal def patched_get_exit_signal(pair, timeframe, dataframe, is_short): if is_short: return enter_short, exit_short, exit_tag else: return enter_long, exit_long, exit_tag # returns (enter, exit) freqtrade.strategy.get_exit_signal = patched_get_exit_signal freqtrade.exchange.refresh_latest_ohlcv = lambda p: None def create_mock_trades(fee, is_short: Optional[bool] = False, use_db: bool = True): """ Create some fake trades ... :param is_short: Optional bool, None creates a mix of long and short trades. """ def add_trade(trade): if use_db: Trade.session.add(trade) else: LocalTrade.add_bt_trade(trade) is_short1 = is_short if is_short is not None else True is_short2 = is_short if is_short is not None else False # Simulate dry_run entries trade = mock_trade_1(fee, is_short1) add_trade(trade) trade = mock_trade_2(fee, is_short1) add_trade(trade) trade = mock_trade_3(fee, is_short2) add_trade(trade) trade = mock_trade_4(fee, is_short2) add_trade(trade) trade = mock_trade_5(fee, is_short2) add_trade(trade) trade = mock_trade_6(fee, is_short1) add_trade(trade) if use_db: Trade.commit() def create_mock_trades_with_leverage(fee, use_db: bool = True): """ Create some fake trades ... """ if use_db: Trade.session.rollback() def add_trade(trade): if use_db: Trade.session.add(trade) else: LocalTrade.add_bt_trade(trade) # Simulate dry_run entries trade = mock_trade_1(fee, False) add_trade(trade) trade = mock_trade_2(fee, False) add_trade(trade) trade = mock_trade_3(fee, False) add_trade(trade) trade = mock_trade_4(fee, False) add_trade(trade) trade = mock_trade_5(fee, False) add_trade(trade) trade = mock_trade_6(fee, False) add_trade(trade) trade = short_trade(fee) add_trade(trade) trade = leverage_trade(fee) add_trade(trade) if use_db: Trade.session.flush() def create_mock_trades_usdt(fee, is_short: Optional[bool] = False, use_db: bool = True): """ Create some fake trades ... """ def add_trade(trade): if use_db: Trade.session.add(trade) else: LocalTrade.add_bt_trade(trade) is_short1 = is_short if is_short is not None else True is_short2 = is_short if is_short is not None else False # Simulate dry_run entries
trade = mock_trade_usdt_1(fee, is_short1)
8
2023-11-07 18:46:03+00:00
12k
awslabs/optimizing-multitask-training-through-dynamic-pipelines
scripts/simulation/compare_batching_methods.py
[ { "identifier": "ProfileBasedCostModelWithRC", "path": "dynapipe/data_opt/cost_models.py", "snippet": "class ProfileBasedCostModelWithRC(object):\n \"\"\"\n Wrapper class for multiple ProfileBasedCostModel objects, one for each\n tensor parallel degree and recomputation method.\n \"\"\"\n\n def __init__(\n self,\n profile_paths=None,\n _serialized_cms: Optional[Dict[Tuple[int, str], bytes]] = None,\n ) -> None:\n self.cost_models: dict[str, ProfileBasedCostModel] = {}\n if _serialized_cms is not None:\n for cm_key, serialized_cm in _serialized_cms.items():\n self.cost_models[cm_key] = ProfileBasedCostModel.deserialize(\n serialized_cm\n )\n return\n if not isinstance(profile_paths, list):\n # profile_paths is a dir\n assert os.path.isdir(profile_paths), (\n f\"Profile path {profile_paths} is not a directory \"\n \"or list of paths\"\n )\n profile_paths = [\n os.path.join(profile_paths, x)\n for x in os.listdir(profile_paths)\n if x.startswith(\"microbench\") and x.endswith(\"txt\")\n ]\n # separate paths by cost model key (tp_size, rc_type)\n self.per_key_profile_paths = defaultdict(list)\n for path in profile_paths:\n cm_key = self._parse_cm_key(path)\n self.per_key_profile_paths[cm_key].append(path)\n for cm_key, paths in self.per_key_profile_paths.items():\n self.cost_models[cm_key] = ProfileBasedCostModel(paths)\n\n def _parse_cm_key(self, filename):\n basename = os.path.basename(filename)\n if \"rc_full_uniform\" in basename:\n rc_type = \"full\"\n elif \"rc_selective\" in basename:\n rc_type = \"selective\"\n else:\n rc_type = \"none\"\n tp_size = int(basename.split(\"_\")[1][2:])\n return tp_size, rc_type\n\n def _check_valid_cm_key(self, cm_key):\n assert (\n cm_key in self.cost_models\n ), f\"Key {cm_key} not recorded in profile.\"\n\n def is_valid_stage(self, tp_size, rc_type, stage):\n self._check_valid_cm_key((tp_size, rc_type))\n return self.cost_models[(tp_size, rc_type)].is_valid_stage(stage)\n\n def valid_stages(self, tp_size, rc_type):\n self._check_valid_cm_key((tp_size, rc_type))\n return self.cost_models[(tp_size, rc_type)].valid_stages()\n\n def supported_sequence_lengths(self, tp_size, rc_type, stage):\n self._check_valid_cm_key((tp_size, rc_type))\n return self.cost_models[(tp_size, rc_type)].supported_sequence_lengths(\n stage\n )\n\n def get_cost(\n self,\n tp_size,\n rc_type,\n stage,\n seq_len,\n mbs,\n ):\n \"\"\"Select the corresponding cost model based on TP degree and\n recomputation type and get the computation cost.\n \"\"\"\n self._check_valid_cm_key((tp_size, rc_type))\n return self.cost_models[(tp_size, rc_type)].get_cost(\n stage, seq_len, mbs\n )\n\n def get_stored_activation(self, tp_size, rc_type, stage, seq_len, mbs):\n \"\"\"Select the corresponding cost model based on TP degree and\n recomputation type and get the stored activation.\n \"\"\"\n self._check_valid_cm_key((tp_size, rc_type))\n return self.cost_models[(tp_size, rc_type)].get_stored_activation(\n stage, seq_len, mbs\n )\n\n def get_peak_activation(self, tp_size, rc_type, stage, seq_len, mbs):\n \"\"\"Select the corresponding cost model based on TP degree and\n recomputation type and get the peak activation.\n \"\"\"\n self._check_valid_cm_key((tp_size, rc_type))\n return self.cost_models[(tp_size, rc_type)].get_peak_activation(\n stage, seq_len, mbs\n )\n\n def get_model_state(\n self,\n tp_size,\n rc_type,\n stage,\n n_shards=1,\n zero_stage=0,\n param_factor=None,\n ):\n \"\"\"Select the corresponding cost model based on TP degree and\n recomputation type and get the model state.\n \"\"\"\n self._check_valid_cm_key((tp_size, rc_type))\n return self.cost_models[(tp_size, rc_type)].get_model_state(\n stage,\n n_shards=n_shards,\n zero_stage=zero_stage,\n param_factor=param_factor,\n )\n\n def get_raw_cost_model(self, tp_size, rc_type):\n \"\"\"Get the raw cost model for the given TP degree and recomputation\n type.\n \"\"\"\n self._check_valid_cm_key((tp_size, rc_type))\n return self.cost_models[(tp_size, rc_type)]\n\n def save(self, path):\n serialized_dict = {}\n for cm_key, cost_model in self.cost_models.items():\n serialized_dict[cm_key] = cost_model.serialize()\n with open(path, \"wb\") as f:\n pickle.dump(serialized_dict, f)\n\n @classmethod\n def load(cls, path):\n with open(path, \"rb\") as f:\n serialized_dict = pickle.load(f)\n return cls(_serialized_cms=serialized_dict)" }, { "identifier": "DataAssignmentOptimizer", "path": "dynapipe/data_opt/optimizer.py", "snippet": "class DataAssignmentOptimizer(object):\n \"\"\"Data assignment optimizer.\n\n Optimizes the assignment of a mini-batch of data into micro-batches.\n \"\"\"\n\n def __init__(\n self,\n cost_model: ProfileBasedCostModelWithRC,\n model_spec: TransformerModelSpec,\n n_executors: int,\n n_layers_per_stage: int,\n n_chunks_per_device: int = 1,\n dp_size: int = 1,\n tp_size: int = 1,\n zero_stage: int = 0,\n device_memory_limit: float = float(\"inf\"),\n round_seqlen_multiple=8,\n per_mb_memory_fraction=None,\n len_pack_sep_tokens=1,\n len_decoder_additional_tokens=2,\n seqlen_offset=0,\n ):\n \"\"\"Optimizer for assigning data samples into micro-batches.\n cost_model: cost model for the model used\n model_spec: model specification\n n_executors: number of stages of the pipelined model\n n_layers_per_stage: number of layers per each pipeline stage\n n_chunks_per_device: number of chunks per device\n (> 1 indicating interleaved schedule)\n dp_size: data parallelism degree\n tp_size: tensor parallelism degree\n zero_stage: stage of ZeRO optimizer\n device_memory_limit: memory limit in MB (MegaBytes)\n round_seqlen_multiple: always round sequence length to multiple of\n this number, required for some kernels\n default: 8\n len_pack_sep_tokens: number of tokens used to separate samples in the\n packed sequence, only used when enable_packing\n is True during optimization.\n len_decoder_additional_tokens: number of additional tokens added to\n the decoder sequence length other than\n the target sequence, e.g. <bos>, <eos>\n seqlen_offset: should be set 1 for decoder only models, whose input\n and target sequences are data sequence length - 1\n 0 for encoder-decoder models.\n \"\"\"\n self.cost_model = cost_model\n self.n_executors = n_executors\n self.n_layers_per_stage = n_layers_per_stage\n # create memory model\n self.model_spec = model_spec\n self.memory_limit = device_memory_limit\n self.dp_size = dp_size\n self.tp_size = tp_size\n self.zero_stage = zero_stage\n self.round_seqlen_multiple = round_seqlen_multiple\n self.len_pack_sep_tokens = len_pack_sep_tokens\n self.len_decoder_additional_tokens = len_decoder_additional_tokens\n self.n_chunks_per_device = n_chunks_per_device\n self.per_mb_memory_fraction = per_mb_memory_fraction\n self.seqlen_offset = seqlen_offset\n\n def _round_seqlen(self, seqlen, decoder=False):\n if decoder:\n seqlen += self.len_decoder_additional_tokens\n seqlen -= self.seqlen_offset\n return (\n (seqlen + self.round_seqlen_multiple - 1)\n // self.round_seqlen_multiple\n * self.round_seqlen_multiple\n + self.seqlen_offset\n )\n\n def _solve_sample_order_tsp_problem(\n self,\n sample_sequence_lengths,\n decoder_sample_sequence_lengths,\n bottleneck_tsp=True,\n dist_function=\"sum\",\n use_clustering=True,\n distance_threshold=16,\n ):\n \"\"\"Solve the TSP problem to determine the sample order.\"\"\"\n if dist_function == \"sum\":\n\n def _f_dist(x, y):\n return abs(int(x[0]) - int(y[0])) + abs(int(x[1]) - int(y[1]))\n\n elif dist_function == \"max\":\n\n def _f_dist(x, y):\n return max(\n abs(int(x[0]) - int(y[0])), abs(int(x[1]) - int(y[1]))\n )\n\n elif dist_function == \"square\":\n\n def _f_dist(x, y):\n return (int(x[0]) - int(y[0])) ** 2 + (\n int(x[1]) - int(y[1])\n ) ** 2\n\n else:\n raise ValueError(\n \"Unknown distance function: {}\".format(dist_function)\n )\n\n def _get_distance_matrix(points):\n # add a dummy point at the beginning\n # to transform it into an open TSP problem\n distance_matrix = [[0] * (len(points) + 1)]\n for x in points:\n row = [0]\n for y in points:\n row.append(_f_dist(x, y))\n distance_matrix.append(row)\n return distance_matrix\n\n input_points = list(\n zip(sample_sequence_lengths, decoder_sample_sequence_lengths)\n )\n if use_clustering:\n vectors_np = np.array(input_points)\n clustering = AgglomerativeClustering(\n n_clusters=None,\n distance_threshold=distance_threshold,\n linkage=\"complete\",\n ).fit(vectors_np)\n labels = clustering.labels_\n n_clusters = max(labels) + 1\n cluster_to_samples = [[] for _ in range(n_clusters)]\n cluster_to_data = [[] for _ in range(n_clusters)]\n for sample_idx, label in enumerate(labels):\n cluster_to_samples[label].append(sample_idx)\n cluster_to_data[label].append(input_points[sample_idx])\n # compute cluster centroids\n cluster_to_center = [None] * n_clusters\n for cluster_label, data in enumerate(cluster_to_data):\n cluster_to_center[cluster_label] = tuple(np.mean(data, axis=0))\n # compute tsp for cluster centroids\n distance_matrix = np.array(_get_distance_matrix(cluster_to_center))\n permutation = list(\n np.array(\n elkai.solve_int_matrix(\n distance_matrix, 1, bottleneck=bottleneck_tsp\n )\n )\n - 1\n )[1:]\n # reconstruct orig order\n result = []\n for cluster_label in permutation:\n result += cluster_to_samples[cluster_label]\n # sanity check result is a valid permutation\n assert sorted(result) == list(range(len(result)))\n return result\n\n distance_matrix = np.array(_get_distance_matrix(input_points))\n permutation = list(\n np.array(\n elkai.solve_int_matrix(\n distance_matrix, 1, bottleneck=bottleneck_tsp\n )\n )\n - 1\n )[1:]\n return permutation\n\n def _pack(\n self,\n sequence: list,\n current_enc_length,\n current_dec_length,\n target_enc_length,\n target_dec_length,\n next_idx,\n samples_with_ids,\n consumed,\n ):\n for j in range(next_idx, len(samples_with_ids)):\n if consumed[j]:\n continue\n (\n seqlen_to_pack,\n dec_seqlen_to_pack,\n sample_id_to_pack,\n ) = samples_with_ids[j]\n if (\n current_enc_length + seqlen_to_pack <= target_enc_length\n and current_dec_length + dec_seqlen_to_pack\n <= target_dec_length\n ):\n sequence.append(sample_id_to_pack)\n current_enc_length += seqlen_to_pack\n current_dec_length += dec_seqlen_to_pack\n consumed[j] = True\n return current_enc_length, current_dec_length\n\n def _uniform_partition(self, samples_with_ids, microbatch_size):\n max_sequence_length = max([x[0] for x in samples_with_ids])\n max_decoder_sequence_length = max([x[1] for x in samples_with_ids])\n\n # round sequence length to multiple of round_seqlen_multiple\n max_sequence_length = self._round_seqlen(max_sequence_length)\n max_decoder_sequence_length = self._round_seqlen(\n max_decoder_sequence_length, decoder=True\n )\n # pack all sequences into fixed sequence length\n target_src_seqlen = max_sequence_length\n target_tgt_seqlen = (\n max_decoder_sequence_length - self.len_decoder_additional_tokens\n )\n consumed = [False] * len(samples_with_ids)\n sequences = []\n for seqlen, dec_seqlen, idx in samples_with_ids:\n if consumed[idx]:\n continue\n curr_sequence = []\n curr_sequence_seqlen = seqlen\n curr_sequence_dec_seqlen = dec_seqlen\n curr_sequence.append(idx)\n curr_sequence_seqlen, curr_sequence_dec_seqlen = self._pack(\n curr_sequence,\n curr_sequence_seqlen,\n curr_sequence_dec_seqlen,\n target_src_seqlen,\n target_tgt_seqlen,\n idx + 1,\n samples_with_ids,\n consumed,\n )\n sequences.append(curr_sequence)\n consumed[idx] = True\n # divide sequences into microbatches\n microbatches = []\n for i in range(0, len(sequences), microbatch_size):\n microbatches.append(sequences[i : i + microbatch_size])\n return microbatches\n\n def _token_based_partition(self, samples_with_ids, microbatch_tokens):\n microbatches = []\n current_microbatch_tokens = 0\n current_microbatch = []\n for seqlen, dec_seqlen, idx in samples_with_ids:\n rounded_seqlen = self._round_seqlen(seqlen)\n rounded_dec_seqlen = self._round_seqlen(dec_seqlen, decoder=True)\n if (\n current_microbatch_tokens + rounded_seqlen + rounded_dec_seqlen\n > microbatch_tokens\n ):\n if len(current_microbatch) > 0:\n microbatches.append(current_microbatch.copy())\n current_microbatch = []\n current_microbatch_tokens = 0\n current_microbatch.append([idx])\n current_microbatch_tokens += seqlen + dec_seqlen\n if len(current_microbatch) > 0:\n microbatches.append(current_microbatch)\n return microbatches\n\n def _subset_partition(self, micro_batch_costs):\n # partition the microbatches into subsets\n # create a mapping from microbatch index to its cost\n mb_cost_map = {}\n for i, mb in enumerate(micro_batch_costs):\n mb_cost_map[i] = mb\n return prtpy.partition(\n algorithm=prtpy.partitioning.kk,\n numbins=self.dp_size,\n items=mb_cost_map,\n )\n\n def generate_microbatches(\n self,\n sample_sequence_lengths,\n available_rc_types=None,\n decoder_sample_sequence_lengths=None,\n disable_tsp=False,\n bottleneck_tsp=False,\n tsp_dist_function=\"sum\",\n tsp_use_clustering=True,\n tsp_cluster_distance_threshold=16,\n partition_method=\"dp\",\n uniform_partition_batch_size=None,\n token_based_partition_mb_tokens=None,\n enable_packing=False,\n ):\n if available_rc_types is None:\n available_rc_types = [\"none\", \"selective\", \"full\"]\n if (\n self.n_chunks_per_device > 1\n and decoder_sample_sequence_lengths is None\n ):\n raise ValueError(\n \"Interleaved schedule with non-encoder-decoder models \"\n \"are not supported yet.\"\n )\n # stage 1: determine the order of samples\n if decoder_sample_sequence_lengths is None:\n samples_with_ids = [\n (seqlen, 0, i)\n for i, seqlen in enumerate(sample_sequence_lengths)\n ]\n # single sequence, sorting suffices\n samples_with_ids.sort(reverse=True)\n else:\n if partition_method == \"uniform\":\n assert uniform_partition_batch_size is not None, (\n \"uniform_partition_batch_size must be specified \"\n \"when partition_method is 'uniform'\"\n )\n # uniform partitioning, don't need to solve TSP\n samples_with_ids = [\n (seqlen, dec_seqlen, i)\n for i, (seqlen, dec_seqlen) in enumerate(\n zip(\n sample_sequence_lengths,\n decoder_sample_sequence_lengths,\n )\n )\n ]\n else:\n # multiple sequences, use TSP or 2 level sorting\n # to find the optimal order\n if disable_tsp:\n samples_with_ids = [\n (seqlen, dec_seqlen, i)\n for i, (seqlen, dec_seqlen) in enumerate(\n zip(\n sample_sequence_lengths,\n decoder_sample_sequence_lengths,\n )\n )\n ]\n # sort first by encoder sequence length, then by decoder\n samples_with_ids.sort(reverse=True)\n else:\n permutation = self._solve_sample_order_tsp_problem(\n sample_sequence_lengths,\n decoder_sample_sequence_lengths,\n bottleneck_tsp=bottleneck_tsp,\n dist_function=tsp_dist_function,\n use_clustering=tsp_use_clustering,\n distance_threshold=tsp_cluster_distance_threshold,\n )\n samples_with_ids = [\n (\n sample_sequence_lengths[i],\n decoder_sample_sequence_lengths[i],\n int(i),\n )\n for i in permutation\n ]\n # stage 2: splitting and packing\n # we first calculate the model states memory and subtract it\n # from the memory limit\n # We assume that GPU0 is the bottleneck GPU, which holds Embedding\n # and Encoder of the model if not interleaved, and holds Embedding,\n # Encoder and Decoder of the model if interleaved.\n # rc_type doesn't matter here\n model_states_memory = self.cost_model.get_model_state(\n self.tp_size,\n \"none\",\n \"Embedding\",\n n_shards=self.dp_size,\n zero_stage=self.zero_stage,\n )\n encoder_model_state = self.cost_model.get_model_state(\n self.tp_size,\n \"none\",\n \"Encoder\",\n n_shards=self.dp_size,\n zero_stage=self.zero_stage,\n )\n if decoder_sample_sequence_lengths is not None:\n decoder_model_state = self.cost_model.get_model_state(\n self.tp_size,\n \"none\",\n \"Decoder\",\n n_shards=self.dp_size,\n zero_stage=self.zero_stage,\n )\n else:\n decoder_model_state = 0\n if self.n_chunks_per_device == 1:\n # not interleaved\n layer_states = max(encoder_model_state, decoder_model_state)\n else:\n # interleaved\n layer_states = encoder_model_state + decoder_model_state\n layer_states = layer_states * self.n_chunks_per_device / 2\n layer_states *= self.n_layers_per_stage\n model_states_memory += layer_states\n available_memory = self.memory_limit - model_states_memory\n\n if (\n self.per_mb_memory_fraction is not None\n and self.per_mb_memory_fraction > 0\n ):\n preferred_memory_limit = (\n self.per_mb_memory_fraction * available_memory\n )\n else:\n preferred_memory_limit = available_memory / self.n_executors\n for memory_type, memory_limit in [\n (\"preferred\", preferred_memory_limit),\n (\"available\", available_memory),\n ]:\n # first try to find a partition that do not need special schedule\n # if not found, only make sure that each single microbatch\n # fits in memory\n for rc_type in available_rc_types:\n if partition_method == \"dp\":\n # use dynamic programming to find optimal\n # sequential partition\n (\n objective_value,\n microbatches,\n microbatch_costs,\n ) = cpp_consecutive_partition_dp(\n self.cost_model.get_raw_cost_model(\n self.tp_size, rc_type\n ),\n self.n_executors,\n self.n_chunks_per_device,\n self.n_layers_per_stage,\n self.dp_size,\n memory_limit,\n available_memory,\n samples_with_ids,\n enable_packing=enable_packing,\n round_seqlen_multiple=self.round_seqlen_multiple,\n len_pack_sep_tokens=self.len_pack_sep_tokens,\n len_decoder_additional_tokens=self.len_decoder_additional_tokens, # noqa\n )\n elif partition_method == \"token_based\":\n assert token_based_partition_mb_tokens is not None, (\n \"token_based_partition_mb_tokens must be specified \"\n \"when partition_method is 'token_based'\"\n )\n # token based partitioning\n microbatches = self._token_based_partition(\n samples_with_ids, token_based_partition_mb_tokens\n )\n # dummy objective value, not used\n objective_value = (\n 0,\n 0,\n 0,\n [0] * len(microbatches),\n [0] * len(microbatches),\n )\n # dummy microbatch costs\n microbatch_costs = [0] * len(microbatches)\n elif partition_method == \"uniform\":\n microbatches = self._uniform_partition(\n samples_with_ids, uniform_partition_batch_size\n )\n # dummy objective value, not used\n objective_value = (\n 0,\n 0,\n 0,\n [0] * len(microbatches),\n [0] * len(microbatches),\n )\n # dummy microbatch costs\n microbatch_costs = [0] * len(microbatches)\n else:\n raise ValueError(\n \"unknown partition method: {}\".format(partition_method)\n )\n if math.isinf(objective_value[0]) or math.isnan(\n objective_value[0]\n ):\n # memory limit is too small\n continue\n # sanity check microbatches:\n # make sure that each index appears once and only once\n all_indices = set()\n for mb in microbatches:\n for sample in mb:\n for index in sample:\n assert (\n index not in all_indices\n ), \"index {} appears more than once\".format(index)\n all_indices.add(index)\n assert sorted(list(all_indices)) == list(\n range(len(samples_with_ids))\n ), (\n \"not all indices appear in microbatches: \"\n \"{} v.s. {}. Input seqlens: {}, target seqlens: {}\".format(\n len(all_indices),\n len(samples_with_ids),\n sample_sequence_lengths,\n decoder_sample_sequence_lengths,\n )\n )\n # partition microbatches into subsets, each for one data\n # parallel group\n if self.dp_size > 1:\n partitioned_microbatch_ids = self._subset_partition(\n microbatch_costs\n )\n partitioned_microbatches = []\n for mb_ids in partitioned_microbatch_ids:\n partitioned_microbatches.append(\n [microbatches[i] for i in sorted(mb_ids)]\n )\n else:\n partitioned_microbatches = [microbatches]\n return (\n objective_value,\n partitioned_microbatches,\n memory_type,\n rc_type,\n (available_memory, model_states_memory, memory_limit),\n )\n # no feasible microbatch split found\n return None, None, None, None, None" }, { "identifier": "TransformerModelSpec", "path": "dynapipe/model.py", "snippet": "class TransformerModelSpec:\n # Default setting:\n # * mlp_hidden_size = 4x hidden_dim\n # * kv_channels = hidden_dim // num_attn_heads\n # * use FP16 mixed precision training with Adam optimizer.\n n_encoder_layers: int\n n_decoder_layers: int\n hidden_dim: int\n num_attn_heads: int\n mlp_hidden_dim: Union[None, int] = None\n kv_channels: Union[None, int] = None\n bytes_per_element: int = 2\n optimizer_state_multiplier: int = 12\n\n def __post_init__(self):\n if self.mlp_hidden_dim is None:\n # if not specified, use the 4x hidden dim as it is the norm\n self.mlp_hidden_dim = self.hidden_dim * 4\n if self.kv_channels is None:\n # if not specified, use the hidden_dim // num_attn_heads\n assert self.hidden_dim % self.num_attn_heads == 0\n self.kv_channels = self.hidden_dim // self.num_attn_heads\n\n def serialize(self) -> bytes:\n def _serialize_int(x: int):\n return x.to_bytes(4, \"little\")\n\n return b\"\".join(\n [\n _serialize_int(x)\n for x in [\n self.n_encoder_layers,\n self.n_decoder_layers,\n self.hidden_dim,\n self.num_attn_heads,\n self.mlp_hidden_dim,\n self.kv_channels,\n self.bytes_per_element,\n self.optimizer_state_multiplier,\n ]\n ]\n )\n\n @classmethod\n def deserialize(cls, data: bytes):\n def _deserialize_int(data: bytes):\n return int.from_bytes(data, \"little\")\n\n return cls(\n *[_deserialize_int(data[i * 4 : (i + 1) * 4]) for i in range(8)]\n )" } ]
import argparse import math import jsonlines import numpy as np import pickle from multiprocessing import Pool from typing import Optional from tqdm import tqdm from dynapipe.data_opt.cost_models import ProfileBasedCostModelWithRC from dynapipe.data_opt.optimizer import DataAssignmentOptimizer from dynapipe.model import TransformerModelSpec
7,493
"-c", "--cost-model", type=str, required=True, help="Path to a cost model file, needed for dynamic " " batching.", ) parser.add_argument( "-m", "--model", type=str, required=True, choices=["gpt", "t5"], help="Model to use.", ) parser.add_argument( "-g", "--global-batch-size", type=int, default=65536, help="Global batch size.", ) parser.add_argument( "-o", "--output", type=str, default="compare_batching_methods.jsonl", help="Output file.", ) parser.add_argument( "-ml", "--mem-limit", type=float, default=float("inf"), help="Memory limit for the data assignment optimizer.", ) parser.add_argument( "-ppr", "--pp-degree-range", type=str, default="1", help="Range of pipeline stages to simulate.", ) parser.add_argument( "-tpd", "--tp-degree", type=int, default=1, help="TP degree to simulate.", ) parser.add_argument( "-p", "--num-processes", type=int, default=64, help="Number of processes to use.", ) args = parser.parse_args() args.max_seqlen_range = [int(x) for x in args.max_seqlen_range.split(",")] args.pp_degree_range = [int(x) for x in args.pp_degree_range.split(",")] return args def get_powers_of_2_up_to(n): return [2**i for i in range(math.floor(math.log2(n)) + 1)] def get_candidate_mbs(maxn=512): return get_powers_of_2_up_to(maxn) def get_candidate_tokens(maxn=65536): return [x for x in get_powers_of_2_up_to(maxn) if x >= 32] def get_sequence_lengths(dataset_path, max_seqlen): """Get the sequence lengths from a Megatron-LM processed dataset.""" with open(dataset_path, "rb") as f: dataset = np.load(f) # dataset contains 3 columns: [start_id, end_id, sequence_length] # we only need the sequence length return np.clip(dataset[:, 2], 1, max_seqlen).astype(np.int32)[:100000] def get_global_batches(input_seqlens, target_seqlens, gbs=65536): """Get the number of global batches for a given global batch size.""" global_batches = [] current_batch = [] current_batch_size = 0 for input_seqlen, target_seqlen in zip(input_seqlens, target_seqlens): if current_batch_size + input_seqlen + target_seqlen > gbs: global_batches.append(current_batch.copy()) current_batch = [] current_batch_size = 0 current_batch.append((input_seqlen, target_seqlen)) current_batch_size += input_seqlen + target_seqlen if current_batch: global_batches.append(current_batch.copy()) return global_batches def get_model_spec(pp_degree, model="gpt"): if model == "gpt": return TransformerModelSpec(4 * pp_degree, 0, 4096, 32, 16384, 128) elif model == "t5": return TransformerModelSpec( 2 * pp_degree, 2 * pp_degree, 1024, 128, 65536, 128 ) else: raise ValueError("Unsupported model: {}".format(model)) def get_dataopt( pp_degree, cost_model, model="gpt", memlimit=float("inf"), tp_degree=1 ): num_stages = pp_degree model_spec = get_model_spec(pp_degree, model) zero_stage = 0 n_layers_per_stage = 4 dp_size = 1
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 def parse_args(): parser = argparse.ArgumentParser("Compare batching methods") parser.add_argument( "-t", "--method", type=str, choices=["none", "packing", "dynamic", "fixed_mbs", "fixed_tokens"], required=True, help="Micro-batching method to use.", ) parser.add_argument( "-s", "--max-seqlen-range", type=str, default="2048", help="Range of maximum sequence length to simulate. " "Format as comma separated list of integers.", ) parser.add_argument( "-di", "--input-dataset", type=str, required=True, help="Path to a Megatron-LM processed indexfile, " "which records the sequence length of samples in npy " "format. For input sequences.", ) parser.add_argument( "-dt", "--target-dataset", type=str, required=True, help="Dataset path for target sequences.", ) parser.add_argument( "-c", "--cost-model", type=str, required=True, help="Path to a cost model file, needed for dynamic " " batching.", ) parser.add_argument( "-m", "--model", type=str, required=True, choices=["gpt", "t5"], help="Model to use.", ) parser.add_argument( "-g", "--global-batch-size", type=int, default=65536, help="Global batch size.", ) parser.add_argument( "-o", "--output", type=str, default="compare_batching_methods.jsonl", help="Output file.", ) parser.add_argument( "-ml", "--mem-limit", type=float, default=float("inf"), help="Memory limit for the data assignment optimizer.", ) parser.add_argument( "-ppr", "--pp-degree-range", type=str, default="1", help="Range of pipeline stages to simulate.", ) parser.add_argument( "-tpd", "--tp-degree", type=int, default=1, help="TP degree to simulate.", ) parser.add_argument( "-p", "--num-processes", type=int, default=64, help="Number of processes to use.", ) args = parser.parse_args() args.max_seqlen_range = [int(x) for x in args.max_seqlen_range.split(",")] args.pp_degree_range = [int(x) for x in args.pp_degree_range.split(",")] return args def get_powers_of_2_up_to(n): return [2**i for i in range(math.floor(math.log2(n)) + 1)] def get_candidate_mbs(maxn=512): return get_powers_of_2_up_to(maxn) def get_candidate_tokens(maxn=65536): return [x for x in get_powers_of_2_up_to(maxn) if x >= 32] def get_sequence_lengths(dataset_path, max_seqlen): """Get the sequence lengths from a Megatron-LM processed dataset.""" with open(dataset_path, "rb") as f: dataset = np.load(f) # dataset contains 3 columns: [start_id, end_id, sequence_length] # we only need the sequence length return np.clip(dataset[:, 2], 1, max_seqlen).astype(np.int32)[:100000] def get_global_batches(input_seqlens, target_seqlens, gbs=65536): """Get the number of global batches for a given global batch size.""" global_batches = [] current_batch = [] current_batch_size = 0 for input_seqlen, target_seqlen in zip(input_seqlens, target_seqlens): if current_batch_size + input_seqlen + target_seqlen > gbs: global_batches.append(current_batch.copy()) current_batch = [] current_batch_size = 0 current_batch.append((input_seqlen, target_seqlen)) current_batch_size += input_seqlen + target_seqlen if current_batch: global_batches.append(current_batch.copy()) return global_batches def get_model_spec(pp_degree, model="gpt"): if model == "gpt": return TransformerModelSpec(4 * pp_degree, 0, 4096, 32, 16384, 128) elif model == "t5": return TransformerModelSpec( 2 * pp_degree, 2 * pp_degree, 1024, 128, 65536, 128 ) else: raise ValueError("Unsupported model: {}".format(model)) def get_dataopt( pp_degree, cost_model, model="gpt", memlimit=float("inf"), tp_degree=1 ): num_stages = pp_degree model_spec = get_model_spec(pp_degree, model) zero_stage = 0 n_layers_per_stage = 4 dp_size = 1
dataopt = DataAssignmentOptimizer(
1
2023-11-08 07:58:20+00:00
12k
apple/ml-reed
reed/data/environment_observation_dataset.py
[ { "identifier": "TrajectoryReplayBuffer", "path": "BPref/replay_buffer.py", "snippet": "class TrajectoryReplayBuffer:\n \"\"\"\n Buffer to store trajectories of environment transitions. Unlike ReplayBuffer, which stores all transitions in a\n flat manner, transitions are sorted by trajectory. Each trajectory corresponds to an episode.\n \"\"\"\n _RELABEL_BATCH_SIZE = 256\n\n def __init__(self, capacity: int, device: torch.device, window: int = 1, num_envs: t.Optional[int] = None,\n image_observations: t.Optional[t.Union[int, np.ndarray]] = None):\n \"\"\"\n Args:\n capacity: the number of trajectories to hold in memory\n device: the device sampled transitions should be put on\n window: no idea - part of the original code and is used in add_batch(...) which has not yet been refactored\n num_envs: the number of environment instances used to train the policy. Only needs to be specified when the\n number is >1. Some algorithms train on multiple instances of an environment at once, e.g. PPO.\n Not currently used, but not yet removed because we have not tested with an algorithm that needs\n multiple environment instances.\n image_observations: (default = false) whether to collect image observations in addition to state\n observations. This is helpful to use when the policy is trained on the state, but you\n want to visualize the trajectories or the reward model is trained on images.\n\n \"\"\"\n self.capacity = capacity\n self.device = device\n\n self.observations: t.Optional[np.ndarray] = None\n self.actions: t.Optional[np.ndarray] = None\n self.rewards: t.Optional[np.ndarray] = None\n self.not_dones: t.Optional[np.ndarray] = None\n self.not_dones_no_max: t.Optional[np.ndarray] = None\n self.trajectory_lengths: t.List = []\n self.window = window\n self.env_rewards: t.Optional[np.ndarray] = None\n self.image_observations: t.Optional[np.ndarray] = None\n # track whether to collect image observations - when not None, specifies the dimensions of the images\n self._collect_image_observations = image_observations\n\n # track the trajectories as a list of Trajectory\n self.trajectories: t.List[Trajectory] = []\n\n self.idx = 0\n self.last_save = 0\n self.full = False\n\n def __len__(self):\n return np.sum(self.trajectory_lengths) - len(self.trajectory_lengths)\n\n def __getitem__(self, flat_indx: t.Union[int, t.Tuple[int, int], t.List[int]]) -> TRANSITION:\n \"\"\"\n Get the transition at the given index\n\n Args:\n flat_indx: the index assuming transitions are stored flat instead of nested in trajectories\n - when an integer is specified, a single transition is retrieved\n - when a tuple of integers is given, a slice is retrieved as if the transitions are stored flat\n\n Returns:\n current observation\n action\n reward\n next observation\n whether the episode ended\n whether the episode ended without reaching max steps\n image version of current observation (optional)\n \"\"\"\n if isinstance(flat_indx, int) or isinstance(flat_indx, np.int64):\n traj_indx, trans_indx = self._flat_indx_to_trajectory_index(flat_indx)\n # check we are grabbing from a trajectory currently being accumulated\n # When the done signal is given, the current trajectory being accumulated is converted to a trajectory,\n # is added to the list of trajectories, and the values used to accumulate the next trajectory are set to\n # done. The next trajectory is not started until the call to add(...) after the done signal is received.\n # Therefore, we need to check whether the trajectory to pull from is actually the last completed trajectory\n # prior to starting a new trajectory. This is why we compare the length of the lists containing trajectory\n # lengths and the list containing the trajectories.\n if (traj_indx == len(self.trajectory_lengths) - 1\n and len(self.trajectory_lengths) > len(self.trajectories)):\n # we need to grab from the trajectory currently being populated\n return (self.observations[trans_indx].astype(np.float32), self.actions[trans_indx].astype(np.float32),\n self.rewards[trans_indx].astype(np.float32), self.observations[trans_indx + 1].astype(np.float32),\n self.not_dones[trans_indx].astype(np.float32),\n self.not_dones_no_max[trans_indx].astype(np.float32),\n (self.env_rewards[trans_indx].astype(np.float32)\n if self.env_rewards is not None\n else None),\n ((self.image_observations[trans_indx].astype(np.float32))\n if self.image_observations is not None\n else None),\n ((self.image_observations[trans_indx+1].astype(np.float32))\n if self.image_observations is not None\n else None))\n else:\n # grab from a previously completed trajectory\n transition: Transition = self.trajectories[traj_indx][trans_indx]\n return (transition.observation.astype(np.float32), transition.action.astype(np.float32),\n transition.reward.astype(np.float32), transition.next_observation.astype(np.float32),\n transition.not_done.astype(np.float32), transition.not_done_no_max.astype(np.float32),\n transition.env_reward.astype(np.float32),\n (transition.image_observation.astype(np.float32)\n if transition.image_observation is not None\n else None),\n (transition.next_image_observation.astype(np.float32)\n if transition.next_image_observation is not None\n else None))\n elif isinstance(flat_indx, t.List):\n observations = []\n actions = []\n rewards = []\n next_observations = []\n not_dones = []\n not_dones_no_max = []\n env_rewards = []\n image_observations = []\n next_image_observations = []\n for indx in flat_indx:\n observation, action, reward, next_observation, not_done, not_done_no_max, env_reward, image_observation, next_image_observation = self[indx]\n observations.append(observation)\n actions.append(action)\n rewards.append(reward)\n next_observations.append(next_observation)\n not_dones.append(not_done)\n not_dones_no_max.append(not_done_no_max)\n if env_reward is not None:\n env_rewards.append(env_reward)\n if image_observation is not None:\n image_observations.append(image_observation)\n if next_image_observation is not None:\n next_image_observations.append(next_image_observation)\n return (np.asarray(observations, dtype=np.float32), np.asarray(actions, dtype=np.float32),\n np.asarray(rewards, dtype=np.float32), np.asarray(next_observations, dtype=np.float32),\n np.asarray(not_dones, dtype=np.float32), np.asarray(not_dones_no_max, dtype=np.float32),\n (np.asarray(env_rewards, dtype=np.float32) if len(env_rewards) > 0 else None),\n (np.asarray(image_observations, dtype=np.float32) if self._collect_image_observations else None),\n (np.asarray(next_image_observations, dtype=np.float32) if self._collect_image_observations else None))\n else:\n # get the locations of the start and end transitions\n start_traj_indx, start_trans_indx = self._flat_indx_to_trajectory_index(flat_indx[0])\n end_traj_indx, end_trans_indx = self._flat_indx_to_trajectory_index(flat_indx[1])\n # check that we are not spanning trajectories\n if start_traj_indx == end_traj_indx:\n # grab the sub-trajectory\n sub_trajectory = self.trajectories[start_traj_indx][tuple((start_trans_indx, end_trans_indx))]\n else:\n # grab what remains of the trajectory\n end_trans_indx = len(self.trajectories[start_traj_indx]) - 1\n sub_trajectory = self.trajectories[start_traj_indx][tuple((start_trans_indx, end_trans_indx))]\n return (sub_trajectory.initial_observations,\n sub_trajectory.actions,\n sub_trajectory.rewards,\n sub_trajectory.next_observations,\n sub_trajectory.not_dones,\n sub_trajectory.not_dones_no_max,\n sub_trajectory.env_rewards,\n (sub_trajectory.initial_image_observations\n if sub_trajectory.initial_image_observations is not None\n else None),\n (sub_trajectory.next_image_observations\n if sub_trajectory.next_image_observations is not None\n else None))\n\n @property\n def trajectory_count(self) -> int:\n \"\"\"\n The number of trajectories in the buffer\n \"\"\"\n return len(self.trajectories)\n\n @property\n def all_not_dones(self) -> np.ndarray:\n \"\"\"\n Rewards from the state-action pairs from all trajectories and all transitions, where the action was taken in the state\n \"\"\"\n return np.concatenate([np.expand_dims(traj.not_dones, axis=0) for traj in self.trajectories], axis=0)\n\n @property\n def all_rewards(self) -> np.ndarray:\n \"\"\"\n Rewards from the state-action pairs from all trajectories and all transitions, where the action was taken in the state\n \"\"\"\n return np.concatenate([np.expand_dims(traj.rewards, axis=0) for traj in self.trajectories], axis=0)\n\n @property\n def all_environment_rewards(self) -> np.ndarray:\n \"\"\"\n Environment rewards from all trajectories and all transitions\n \"\"\"\n return np.concatenate([np.expand_dims(traj.rewards, axis=0) for traj in self.trajectories], axis=0)\n\n @property\n def all_initial_image_observations(self) -> np.ndarray:\n \"\"\"\n Image observations from the state-action pairs from all trajectories and all transitions, where the action was taken in the state\n \"\"\"\n return np.concatenate([np.expand_dims(traj.initial_image_observations, axis=0)\n for traj in self.trajectories],\n axis=0)\n\n @property\n def all_next_image_observations(self) -> np.ndarray:\n \"\"\"\n Image observations from the state-action pairs from all trajectories and all transitions,\n\n The result of a transition\n \"\"\"\n return np.concatenate([np.expand_dims(traj.next_image_observations, axis=0)\n for traj in self.trajectories],\n axis=0)\n\n @property\n def all_initial_observations(self) -> np.ndarray:\n \"\"\"\n observations from the state-action pairs from all trajectories and all transitions, where the action was taken in the state\n \"\"\"\n return np.concatenate([np.expand_dims(traj.initial_observations, axis=0) for traj in self.trajectories], axis=0)\n\n @property\n def all_next_observations(self) -> np.ndarray:\n \"\"\"\n Observations from the state-action pairs from all trajectories and all transitions\n\n The result of a transition\n \"\"\"\n return np.concatenate([np.expand_dims(traj.next_observations, axis=0) for traj in self.trajectories], axis=0)\n\n @property\n def all_actions(self) -> np.ndarray:\n \"\"\"\n Actions from the state-action pairs from all trajectories and all transitions\n \"\"\"\n return np.concatenate([np.expand_dims(traj.actions, axis=0) for traj in self.trajectories], axis=0)\n\n def _flat_indx_to_trajectory_index(self, flat_indx: int) -> t.Tuple[int, int]:\n \"\"\"\n Converts an index that assumes the transitions are flat to a trajectory and transition (w/in trajectory) index\n\n Args:\n flat_indx: the index assuming transitions are stored flat\n\n Returns:\n the index of the trajectory containing the transition\n the index of the transition within the trajectory\n \"\"\"\n # need to figure out which transition indices are stored in which trajectories\n transition_cumulative_sum = np.cumsum(self.trajectory_lengths)\n # the trajectory containing the transition is at the first index where the cumulative sum of transitions is\n # less than the transition index\n target_trajectory_indx = int(np.argmax(flat_indx < transition_cumulative_sum))\n # get the transition's index within the trajectory as the different between the flat index and the cumulative\n # sum at the previous trajectory - tells us how far into the target trajectory the transition is\n if target_trajectory_indx == 0:\n transition_trajectory_indx = flat_indx\n else:\n transition_trajectory_indx = flat_indx - transition_cumulative_sum[target_trajectory_indx - 1]\n return target_trajectory_indx, transition_trajectory_indx\n\n def _add_transition(self, observation: np.ndarray, action: np.ndarray, reward: float, done: t.Union[float, bool],\n done_no_max: t.Union[float, bool],\n env_reward: t.Optional[float] = None, image_observations: t.Optional[np.ndarray] = None):\n \"\"\"\n Track the transition and update the length of the trajectory currently being accumulated\n\n Args:\n observation: the current observation\n action: the action taken in the current state\n reward: the reward associated with the last state-action pait\n done: whether the last action completed an episode\n done_no_max: whether the last action completed an episode without reaching the maximum allowed steps\n env_reward: (optional) the reward given by the environment - stored and used to train the preference-learned\n reward model when learning from synthetic feedback\n image_observations: (optional) image-based observation -> should not be given is observations is also an image. This\n should be used when you want to accumulate images separately from policy training.\n \"\"\"\n self.observations = np.concatenate([self.observations, np.expand_dims(observation, axis=0)], axis=0)\n self.actions = np.concatenate([self.actions, np.expand_dims(action, axis=0)], axis=0)\n self.rewards = np.concatenate([self.rewards, np.asarray(reward).reshape(1, 1)], axis=0)\n if type(done) is float:\n self.not_dones = np.concatenate([self.not_dones,\n np.asarray(not done, dtype=np.float32).reshape(1, 1)], axis=0)\n self.not_dones_no_max = np.concatenate([self.not_dones_no_max,\n np.asarray(not done_no_max, dtype=np.float32).reshape(1, 1)],\n axis=0)\n else:\n self.not_dones = np.concatenate([self.not_dones,\n np.asarray(~done, dtype=np.float32).reshape(1, 1)], axis=0)\n self.not_dones_no_max = np.concatenate([self.not_dones_no_max,\n np.asarray(~done_no_max, dtype=np.float32).reshape(1, 1)],\n axis=0)\n\n self.trajectory_lengths[-1] += 1\n if env_reward is not None:\n self.env_rewards = np.concatenate([self.env_rewards,\n np.asarray(env_reward, dtype=np.float32).reshape(1, 1)], axis=0)\n\n if image_observations is not None and self._collect_image_observations:\n self.image_observations = np.concatenate([self.image_observations, np.expand_dims(image_observations, axis=0)], axis=0)\n\n def _start_trajectory(self, observation: np.ndarray,\n action: np.ndarray,\n reward: float,\n done: t.Union[float, bool],\n done_no_max: t.Union[float, bool],\n env_reward: t.Optional[float] = None,\n image_observations: t.Optional[np.ndarray] = None):\n \"\"\"\n Start a new trajectory and track the transition\n\n Args:\n observation: the current observation\n action: the action taken in the current state\n reward: the reward associated with the last state-action pait\n done: whether the last action completed an episode\n done_no_max: whether the last action completed an episode without reaching the maximum allowed steps\n env_reward: (optional) the reward given by the environment - stored and used to train the preference-learned\n reward model when learning from synthetic feedback\n image_observations: (optional) image-based observation -> should not be given is observations is also an image. This\n should be used when you want to accumulate images separately from policy training.\n \"\"\"\n self.observations = np.expand_dims(observation, axis=0).astype(dtype=np.float32)\n self.actions = np.expand_dims(action, axis=0).astype(dtype=np.float32)\n self.rewards = np.asarray(reward, dtype=np.float32).reshape(1, 1)\n if type(done) is float:\n self.not_dones = np.asarray(not done, dtype=np.float32).reshape(1, 1)\n self.not_dones_no_max = np.asarray(not done_no_max, dtype=np.float32).reshape(1, 1)\n else:\n self.not_dones = np.asarray(~done, dtype=np.float32).reshape(1, 1)\n self.not_dones_no_max = np.asarray(~done_no_max, dtype=np.float32).reshape(1, 1)\n\n self.trajectory_lengths.append(1)\n\n if env_reward is not None:\n self.env_rewards = np.asarray(env_reward, dtype=np.float32).reshape(1, 1)\n\n if image_observations is not None and self._collect_image_observations:\n self.image_observations = np.expand_dims(image_observations, axis=0).astype(dtype=np.float32)\n\n def add(self, observation, action, reward, next_observation, done, done_no_max,\n env_reward: t.Optional[float] = None, image_observation: t.Optional[np.ndarray] = None,\n image_next_observation: t.Optional[np.ndarray] = None):\n \"\"\"\n Args:\n observation: the current observation\n action: the action taken in the current state\n reward: the reward associated with the last state-action pait\n next_observation: only used when an episode is completed to ensure the last observation is captured\n done: whether the last action completed an episode\n done_no_max: whether the last action completed an episode without reaching the maximum allowed steps\n env_reward: (optional) the reward given by the environment - stored and used to train the preference-learned\n reward model when learning from synthetic feedback\n image_observation: (optional) image-based observation -> should not be given is observations is also an image. This\n should be used when you want to accumulate images separately from policy training.\n image_next_observation: (optional) the image-based next observation -> should not be given when next_observation is also\n and image. This should be used when you want to accumulate the images separately from the\n trained policy.\n \"\"\"\n if self.observations is None:\n self._start_trajectory(observation, action, reward, done, done_no_max, env_reward, image_observation)\n elif done:\n self._add_transition(observation, action, reward, done, done_no_max, env_reward, image_observation)\n # the episode has ended, so we need to track the next observation\n self.observations = np.concatenate([self.observations, np.expand_dims(next_observation, axis=0)], axis=0)\n if image_next_observation is not None:\n self.image_observations = np.concatenate([self.image_observations,\n np.expand_dims(image_next_observation, axis=0)], axis=0)\n # create the trajectory\n self.trajectories.append(Trajectory(self.observations.astype(dtype=np.float32),\n (self.image_observations.astype(dtype=np.float32)\n if self.image_observations is not None\n else None),\n actions=self.actions.astype(dtype=np.float32),\n rewards=self.rewards.astype(dtype=np.float32),\n not_dones=self.not_dones.astype(dtype=np.float32),\n not_dones_no_max=self.not_dones_no_max.astype(dtype=np.float32),\n env_rewards=self.env_rewards.astype(dtype=np.float32)))\n # check if the inclusion of the just completed trajectory puts the buffer at capacity\n # if it does, remove the first trajectory as this is a FIFO buffer\n if np.sum(self.trajectory_lengths) >= self.capacity:\n self.trajectories = self.trajectories[1:]\n self.trajectory_lengths = self.trajectory_lengths[1:]\n self.observations = None\n self.actions = None\n self.rewards = None\n self.not_dones = None\n self.not_dones_no_max = None\n self.env_rewards = None\n self.image_observations = None\n else:\n self._add_transition(observation, action, reward, done, done_no_max, env_reward, image_observation)\n\n self.idx = (self.idx + 1) % self.capacity\n self.full = self.full or self.idx == 0\n\n def relabel_with_predictor(self, predictor, state_action_formatter: PreProcessInference):\n \"\"\"\n Relabel the rewards stored in the replay buffer using the given predictor\n\n Args:\n predictor: network that will consume state-action pairs and assign a reward\n state_action_formatter: formats the states and actions for consumption by the reward model\n \"\"\"\n print(\"Relabelling the replay buffer with the updated reward model.\")\n for trajectory in self.trajectories:\n # the number of batches to run through the model\n total_iter = int(len(trajectory) / self._RELABEL_BATCH_SIZE)\n # handle the case where we have more transitions than is evenly divisible by the batch size\n if len(trajectory) > self._RELABEL_BATCH_SIZE * total_iter:\n total_iter += 1\n # collect and process each batch to be passed through predictor\n for index in range(total_iter):\n start_indx = index * self._RELABEL_BATCH_SIZE\n # make sure we don't have an end index that is after the end of the trajectory\n end_indx = min((index + 1) * self._RELABEL_BATCH_SIZE, len(trajectory))\n\n # pull out the actions from the transitions that will be relabelled\n actions = trajectory.actions[start_indx:end_indx]\n # we need to handle the case where the reward model operates off of images\n if predictor.image_observations:\n observations = trajectory.all_image_observations[start_indx:end_indx]\n else:\n observations = trajectory.all_observations[start_indx:end_indx]\n formatted_state_action = state_action_formatter.format_state_action(observations, actions, batch_sa=True)\n pred_reward = predictor.r_hat_batch(formatted_state_action)\n # update the rewards assigned to the transitions\n trajectory.rewards[start_indx:end_indx] = pred_reward\n\n def sample(self, batch_size: int):\n indxs = list(np.random.randint(0, np.sum(self.trajectory_lengths) - 1, size=batch_size))\n observations, actions, rewards, next_observations, not_dones, not_dones_no_max, env_rewards, image_observations, next_image_observations = self[indxs]\n observations = torch.as_tensor(observations, device=self.device).float()\n actions = torch.as_tensor(actions, device=self.device)\n rewards = torch.as_tensor(rewards, device=self.device)\n next_observations = torch.as_tensor(next_observations, device=self.device).float()\n not_dones = torch.as_tensor(not_dones, device=self.device)\n not_dones_no_max = torch.as_tensor(not_dones_no_max, device=self.device)\n env_rewards = torch.as_tensor(env_rewards, device=self.device)\n image_observations = (torch.as_tensor(image_observations, device=self.device).float() if self._collect_image_observations else None)\n next_image_observations = (torch.as_tensor(next_image_observations, device=self.device).float() if self._collect_image_observations else None)\n return observations, actions, rewards, next_observations, not_dones, not_dones_no_max, env_rewards, image_observations, next_image_observations\n\n def sample_state_ent(self, batch_size: int):\n observations, actions, rewards, next_observations, not_dones, not_dones_no_max, _, _, _ = self.sample(batch_size)\n full_observation = torch.as_tensor(np.concatenate([traj.all_observations for traj in self.trajectories], axis=0),\n device=self.device)\n return observations, full_observation, actions, rewards, next_observations, not_dones, not_dones_no_max\n\n def save(self, out_directory: Path, env_id: str, step: int):\n \"\"\"\n Save the replay buffer to disk as a npz archive\n Args:\n out_directory: location where replay buffer will be saved\n env_id: the environment within which the data was generated\n step: the number of policy training steps taken to produce this dataset\n \"\"\"\n # create the ZipFile object\n zip_obj = ZipFile(out_directory / f\"{env_id}_replay_buffer_{step}.zip\", \"w\")\n\n # write each trajectory file to disk and to the zip archive\n for traj_id, trajectory in enumerate(self.trajectories):\n trajectory.save(out_directory / f\"{traj_id}.npz\")\n zip_obj.write(out_directory / f\"{traj_id}.npz\")\n # close the Zip File\n zip_obj.close()\n\n @staticmethod\n def from_directory(directory_path: Path,\n device: torch.device = 'cuda') -> \"TrajectoryReplayBuffer\":\n \"\"\"\n Create a TrajectoryReplay buffer from a directory of npz archive trajectories\n\n Args:\n directory_path: the location of the npz_archive on disk\n device: the device sampled transitions should be pushed to\n Returns:\n populated trajectory replay buffer\n \"\"\"\n # accumulate the trajectories\n trajectories = []\n trajectory_lengths = []\n # determine how many transitions are in the replay buffer\n capacity = 0\n # load each trajectory from disk\n for traj_filename in directory_path.iterdir():\n # we only load data from npz archives, so we need to skip anything else\n if not traj_filename.suffix == \".npz\": continue\n # load the trajectory from disk\n traj = Trajectory.from_npz(traj_filename)\n # track the trajectory\n trajectories.append(traj)\n # track the trajectory's length\n trajectory_lengths.append(len(traj))\n # track the trajectory's length\n capacity += len(traj)\n # create the buffer\n _buffer = TrajectoryReplayBuffer(capacity=capacity, device=device)\n # add the trajectories to the buffer\n _buffer.trajectories = trajectories\n _buffer.trajectory_lengths = trajectory_lengths\n\n return _buffer" }, { "identifier": "EnvironmentContrastiveDatapoint", "path": "reed/data/environment_transition_dataset.py", "snippet": "class EnvironmentContrastiveDatapoint:\n \"\"\"\n A triplet where two states/observations are given and one is an augmented version of the other.\n\n The augmentation may be along the lines of random crop, jitter, etc or may be a temporal augmentation where the\n augmented state occurs in the future\n \"\"\"\n state = attr.ib(type=torch.Tensor)\n action = attr.ib(type=torch.Tensor)\n augmented_state = attr.ib(type=torch.Tensor)" }, { "identifier": "EnvironmentTransitionDataset", "path": "reed/data/environment_transition_dataset.py", "snippet": "class EnvironmentTransitionDataset(Dataset):\n \"\"\"\n A dataset of environment transitions where the state-action pairs are inputs\n and the next states are the target values.\n\n The dataset can be loaded from a file saved to disk or from a Replay Buffer.\n\n States and next states can be images.\n \"\"\"\n def __init__(self, replay_buffer: t.Optional[TrajectoryReplayBuffer] = None,\n file_path: t.Optional[Path] = None,\n target: str = \"next_observation\",\n device: str = \"cuda\",\n multi_gpu: bool = False,\n image_observations: bool = False,\n image_formatter: t.Optional[t.Any] = None):\n \"\"\"\n Either the replay_buffer or the file_path needs to not be of type None. If neither are of type\n None then both are used to populate the dataset\n\n Args:\n replay_buffer: the buffer of collected environment transitions\n file_path: the location of the datasets file\n target: (default = next_observation) the target for the SFC objective. Must be one of next_observation\n (the target is the next observation) or observation_difference (the target is the difference between\n the current and next observation).\n device: (default = cuda) whether to run on the cpu or a cuda device\n multi_gpu: (default = False) whether the model is trained across multiple GPUs in which case we do not\n push the data to a device before returning it\n image_observations: (default = False) whether or not the states are to be tracked as images\n image_formatter: (default = None) a function to apply to the raw images in order to format them them for\n training\n TODO: define expected format for this file will probably do this\n once get to the point of writing out a dataset file.\n \"\"\"\n assert replay_buffer is not None or file_path is not None, (\"One of replay_buffer or file_path must be \"\n \"specified. Both are None.\")\n super(EnvironmentTransitionDataset, self).__init__()\n\n assert target in {\"next_observation\", \"observation_difference\"}, (f\"target must be one of 'next_observation' or\"\n f\" 'observation_difference', not {target}.\")\n self._target = target\n\n self.states: t.Optional[np.ndarray] = None\n self.actions: t.Optional[np.ndarray] = None\n self.next_states: t.Optional[np.ndarray] = None\n\n # track whether we are using image and the image formatter\n self._image_observations = image_observations\n self._image_formatter = image_formatter\n\n if replay_buffer is not None:\n # track the replay buffer\n self._replay_buffer = replay_buffer\n elif file_path is not None:\n print(\"Implement the load transitions dataset from disk method\")\n import sys; sys.exit()\n else:\n raise NotImplementedError(\"You must specify either a replay buffer or file to load data from.\")\n\n # get the length of each trajectory\n self.trajectory_lengths = [len(traj) for traj in replay_buffer.trajectories]\n\n self._device = device\n self._multi_gpu = multi_gpu\n\n def __len__(self) -> int:\n return len(self._replay_buffer)\n\n def __getitem__(self, indx: int) -> EnvironmentContrastiveDatapoint:\n \"\"\"\n Return the specified sample from the dataset\n Args:\n indx: the index of inputs-target pair to be returned\n\n Returns:\n the environment transition inputs and the target bundled into a single\n datapoint object\n \"\"\"\n # grab the transition at the given index from the replay buffer\n obs, action, _, next_obs, _, _, _, image_observation, next_image_observation = self._replay_buffer[indx]\n # check if our states are images or not\n if self._image_observations:\n state = image_observation\n target = next_image_observation\n if self._image_formatter is not None:\n # when not processing a batch of data, the image formatter adds a dimension at index 0\n # to create a batch of size 1. This does not work with our use of torch.stack() in the\n # collate method\n state = self._image_formatter(state).squeeze(0)\n target = self._image_formatter(target).squeeze(0)\n else:\n state = obs\n target = next_obs\n\n if self._target == \"observation_difference\":\n target = np.abs(np.subtract(target, state))\n\n # convert the numpy arrays to tensors\n states = torch.as_tensor(state)\n target = torch.as_tensor(target)\n actions = torch.as_tensor(action)\n\n return EnvironmentContrastiveDatapoint(state=states.float().to(self._device),\n action=actions.float().to(self._device),\n augmented_state=target.float().to(self._device))\n\n @property\n def observation_shape(self) -> t.Union[int, t.Sequence[int]]:\n if self._image_observations:\n sample_observation = self._replay_buffer.trajectories[0].initial_image_observations\n if self._image_formatter is not None:\n sample_observation = self._image_formatter(sample_observation, batch_states=True)\n else:\n sample_observation = self._replay_buffer.trajectories[0].initial_observations\n return sample_observation.shape[1:]\n\n @property\n def action_shape(self) -> t.Union[int, t.Sequence[int]]:\n sample_action = self._replay_buffer.trajectories[0].actions\n if len(sample_action.shape) == 2:\n return sample_action.shape[-1]\n else:\n # grab dimension sizes after the first and second dimensions to account for the dimensions holding the\n # trajectories and transitions\n return sample_action.shape[1:]\n\n def _flat_indx_to_trajectory_index(self, flat_indx: int) -> t.Tuple[int, int]:\n \"\"\"\n Converts an index that assumes the transitions are flat to a trajectory and transition (w/in trajectory) index\n\n Args:\n flat_indx: the index assuming transitions are stored flat\n\n Returns:\n the index of the trajectory containing the transition\n the index of the transition within the trajectory\n \"\"\"\n # need to figure out which transition indices are stored in which trajectories\n transition_cumulative_sum = np.cumsum(self.trajectory_lengths)\n # the trajectory containing the transition is at the first index where the cumulative sum of transitions is\n # less than the transition index\n target_trajectory_indx = int(np.argmax(flat_indx < transition_cumulative_sum))\n # get the transition's index within the trajectory as the different between the flat index and the cumulative\n # sum at the previous trajectory - tells us how far into the target trajectory the transition is\n if target_trajectory_indx == 0:\n transition_trajectory_indx = flat_indx\n else:\n transition_trajectory_indx = flat_indx - transition_cumulative_sum[target_trajectory_indx - 1]\n return target_trajectory_indx, transition_trajectory_indx\n\n @staticmethod\n def collate(batch: t.List[EnvironmentContrastiveDatapoint]) -> EnvironmentContrastiveBatch:\n \"\"\"\n Collate a batch of environment transitions into a batch of environment transitions\n Args:\n batch: a list of environment transition datasets\n\n Returns:\n a batch of environment transitions\n \"\"\"\n # used to accumulate the network inputs and targets\n states = []\n actions = []\n next_states = []\n\n # accumulate inputs and targets from each sample in the batch\n for sample in batch:\n states.append(sample.state)\n actions.append(sample.action)\n next_states.append(sample.augmented_state)\n\n # bundle the batch of inputs and the batch of targets into a single batch object\n # get item should already have put the tensor on the correct device\n return EnvironmentContrastiveBatch(states=torch.stack(states, dim=0),\n actions=torch.stack(actions, dim=0),\n augmented_states=torch.stack(next_states, dim=0))" }, { "identifier": "EnvironmentContrastiveBatch", "path": "reed/data/environment_transition_dataset.py", "snippet": "class EnvironmentContrastiveBatch:\n \"\"\"\n A batch of triplets where two states/observations are given and one is an augmented version of the other.\n\n The augmentation may be along the lines of random crop, jitter, etc or may be a temporal augmentation where the\n augmented state occurs in the future\n \"\"\"\n states = attr.ib(type=t.Union[torch.Tensor, PackedSequence])\n actions = attr.ib(type=t.Union[torch.Tensor, PackedSequence])\n augmented_states = attr.ib(type=t.Union[torch.Tensor, PackedSequence])\n\n def to_dict(self) -> t.Mapping[str, t.Union[torch.Tensor, PackedSequence]]:\n \"\"\"\n Return the attr as a dictionary\n \"\"\"\n return {\"states\": self.states,\n \"actions\": self.actions,\n \"augmented_states\": self.augmented_states}" } ]
import torch import typing as t from BPref.replay_buffer import TrajectoryReplayBuffer from pathlib import Path from reed.data.environment_transition_dataset import EnvironmentContrastiveDatapoint, EnvironmentTransitionDataset, \ EnvironmentContrastiveBatch from torchvision import transforms from torchvision.transforms import ToTensor, Normalize, \ Grayscale, RandomGrayscale, ColorJitter, RandomApply, RandomHorizontalFlip, GaussianBlur, RandomResizedCrop
8,807
# # For licensing see accompanying LICENSE file. # Copyright (C) 2023 Apple Inc. All Rights Reserved. # JITTER_FACTORS = {'brightness': 0.4, 'contrast': 0.4, 'saturation': 0.4, 'hue': 0.1} DEFAULT_ARGS = { 'normalization_mean': [0.485, 0.456, 0.406], 'normalization_std': [0.229, 0.224, 0.225], 'blur_sigma_min': 0.1, 'blur_sigma_max': 2.0, 'jitter_default': 0., 'strong_jitter_pval': 0.05, 'strong_blur_pval': 0.01, 'strong_crop_scale_min': 0.2, 'strong_crop_scale_max': 0.7, 'strong_crop_ratio_min': 1.2, 'strong_crop_ratio_max': 1.8, 'weak_jitter_pval': 0.1, 'weak_blur_pval': 0., 'weak_crop_scale_min': 0.8, 'weak_crop_scale_max': 1.0, 'weak_crop_ratio_min': 1.6, 'weak_crop_ratio_max': 1.8, 'gaussian_blur_kernel_size': 5, }
# # For licensing see accompanying LICENSE file. # Copyright (C) 2023 Apple Inc. All Rights Reserved. # JITTER_FACTORS = {'brightness': 0.4, 'contrast': 0.4, 'saturation': 0.4, 'hue': 0.1} DEFAULT_ARGS = { 'normalization_mean': [0.485, 0.456, 0.406], 'normalization_std': [0.229, 0.224, 0.225], 'blur_sigma_min': 0.1, 'blur_sigma_max': 2.0, 'jitter_default': 0., 'strong_jitter_pval': 0.05, 'strong_blur_pval': 0.01, 'strong_crop_scale_min': 0.2, 'strong_crop_scale_max': 0.7, 'strong_crop_ratio_min': 1.2, 'strong_crop_ratio_max': 1.8, 'weak_jitter_pval': 0.1, 'weak_blur_pval': 0., 'weak_crop_scale_min': 0.8, 'weak_crop_scale_max': 1.0, 'weak_crop_ratio_min': 1.6, 'weak_crop_ratio_max': 1.8, 'gaussian_blur_kernel_size': 5, }
class AugmentedEnvironmentObservationDataset(EnvironmentTransitionDataset):
2
2023-11-06 23:14:20+00:00
12k
ApolloAuto/apollo-model-yolox
yolox/models/yolox.py
[ { "identifier": "YOLOXHead", "path": "yolox/models/yolo_head.py", "snippet": "class YOLOXHead(nn.Module):\n def __init__(\n self,\n num_classes,\n width=1.0,\n strides=[8, 16, 32],\n in_channels=[256, 512, 1024],\n act=\"silu\",\n depthwise=False,\n ):\n \"\"\"\n Args:\n act (str): activation type of conv. Defalut value: \"silu\".\n depthwise (bool): whether apply depthwise conv in conv branch. Defalut value: False.\n \"\"\"\n super().__init__()\n\n self.num_classes = num_classes\n self.decode_in_inference = True # for deploy, set to False\n\n self.cls_convs = nn.ModuleList()\n self.reg_convs = nn.ModuleList()\n self.cls_preds = nn.ModuleList()\n self.reg_preds = nn.ModuleList()\n self.obj_preds = nn.ModuleList()\n self.stems = nn.ModuleList()\n Conv = DWConv if depthwise else BaseConv\n\n for i in range(len(in_channels)):\n self.stems.append(\n BaseConv(\n in_channels=int(in_channels[i] * width),\n out_channels=int(256 * width),\n ksize=1,\n stride=1,\n act=act,\n )\n )\n self.cls_convs.append(\n nn.Sequential(\n *[\n Conv(\n in_channels=int(256 * width),\n out_channels=int(256 * width),\n ksize=3,\n stride=1,\n act=act,\n ),\n Conv(\n in_channels=int(256 * width),\n out_channels=int(256 * width),\n ksize=3,\n stride=1,\n act=act,\n ),\n ]\n )\n )\n self.reg_convs.append(\n nn.Sequential(\n *[\n Conv(\n in_channels=int(256 * width),\n out_channels=int(256 * width),\n ksize=3,\n stride=1,\n act=act,\n ),\n Conv(\n in_channels=int(256 * width),\n out_channels=int(256 * width),\n ksize=3,\n stride=1,\n act=act,\n ),\n ]\n )\n )\n self.cls_preds.append(\n nn.Conv2d(\n in_channels=int(256 * width),\n out_channels=self.num_classes,\n kernel_size=1,\n stride=1,\n padding=0,\n )\n )\n self.reg_preds.append(\n nn.Conv2d(\n in_channels=int(256 * width),\n out_channels=4,\n kernel_size=1,\n stride=1,\n padding=0,\n )\n )\n self.obj_preds.append(\n nn.Conv2d(\n in_channels=int(256 * width),\n out_channels=1,\n kernel_size=1,\n stride=1,\n padding=0,\n )\n )\n\n self.use_l1 = False\n self.l1_loss = nn.L1Loss(reduction=\"none\")\n self.bcewithlog_loss = nn.BCEWithLogitsLoss(reduction=\"none\")\n self.iou_loss = IOUloss(reduction=\"none\")\n self.strides = strides\n self.grids = [torch.zeros(1)] * len(in_channels)\n\n def initialize_biases(self, prior_prob):\n for conv in self.cls_preds:\n b = conv.bias.view(1, -1)\n b.data.fill_(-math.log((1 - prior_prob) / prior_prob))\n conv.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)\n\n for conv in self.obj_preds:\n b = conv.bias.view(1, -1)\n b.data.fill_(-math.log((1 - prior_prob) / prior_prob))\n conv.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)\n\n def forward(self, xin, labels=None, imgs=None):\n outputs = []\n origin_preds = []\n x_shifts = []\n y_shifts = []\n expanded_strides = []\n\n for k, (cls_conv, reg_conv, stride_this_level, x) in enumerate(\n zip(self.cls_convs, self.reg_convs, self.strides, xin)\n ):\n x = self.stems[k](x)\n cls_x = x\n reg_x = x\n\n cls_feat = cls_conv(cls_x)\n cls_output = self.cls_preds[k](cls_feat)\n\n reg_feat = reg_conv(reg_x)\n reg_output = self.reg_preds[k](reg_feat)\n obj_output = self.obj_preds[k](reg_feat)\n\n if self.training:\n output = torch.cat([reg_output, obj_output, cls_output], 1)\n output, grid = self.get_output_and_grid(\n output, k, stride_this_level, xin[0].type()\n )\n x_shifts.append(grid[:, :, 0])\n y_shifts.append(grid[:, :, 1])\n expanded_strides.append(\n torch.zeros(1, grid.shape[1])\n .fill_(stride_this_level)\n .type_as(xin[0])\n )\n if self.use_l1:\n batch_size = reg_output.shape[0]\n hsize, wsize = reg_output.shape[-2:]\n reg_output = reg_output.view(\n batch_size, 1, 4, hsize, wsize\n )\n reg_output = reg_output.permute(0, 1, 3, 4, 2).reshape(\n batch_size, -1, 4\n )\n origin_preds.append(reg_output.clone())\n\n else:\n output = torch.cat(\n [reg_output, obj_output.sigmoid(), cls_output.sigmoid()], 1\n )\n\n outputs.append(output)\n\n if self.training:\n return self.get_losses(\n imgs,\n x_shifts,\n y_shifts,\n expanded_strides,\n labels,\n torch.cat(outputs, 1),\n origin_preds,\n dtype=xin[0].dtype,\n )\n else:\n self.hw = [x.shape[-2:] for x in outputs]\n # [batch, n_anchors_all, 85]\n outputs = torch.cat(\n [x.flatten(start_dim=2) for x in outputs], dim=2\n ).permute(0, 2, 1)\n if self.decode_in_inference:\n return self.decode_outputs(outputs, dtype=xin[0].type())\n else:\n return outputs\n\n def get_output_and_grid(self, output, k, stride, dtype):\n grid = self.grids[k]\n\n batch_size = output.shape[0]\n n_ch = 5 + self.num_classes\n hsize, wsize = output.shape[-2:]\n if grid.shape[2:4] != output.shape[2:4]:\n yv, xv = meshgrid([torch.arange(hsize), torch.arange(wsize)])\n grid = torch.stack((xv, yv), 2).view(1, 1, hsize, wsize, 2).type(dtype)\n self.grids[k] = grid\n\n output = output.view(batch_size, 1, n_ch, hsize, wsize)\n output = output.permute(0, 1, 3, 4, 2).reshape(\n batch_size, hsize * wsize, -1\n )\n grid = grid.view(1, -1, 2)\n output[..., :2] = (output[..., :2] + grid) * stride\n output[..., 2:4] = torch.exp(output[..., 2:4]) * stride\n return output, grid\n\n def decode_outputs(self, outputs, dtype):\n grids = []\n strides = []\n for (hsize, wsize), stride in zip(self.hw, self.strides):\n yv, xv = meshgrid([torch.arange(hsize), torch.arange(wsize)])\n grid = torch.stack((xv, yv), 2).view(1, -1, 2)\n grids.append(grid)\n shape = grid.shape[:2]\n strides.append(torch.full((*shape, 1), stride))\n\n grids = torch.cat(grids, dim=1).type(dtype)\n strides = torch.cat(strides, dim=1).type(dtype)\n\n outputs = torch.cat([\n (outputs[..., 0:2] + grids) * strides,\n torch.exp(outputs[..., 2:4]) * strides,\n outputs[..., 4:]\n ], dim=-1)\n return outputs\n\n def get_losses(\n self,\n imgs,\n x_shifts,\n y_shifts,\n expanded_strides,\n labels,\n outputs,\n origin_preds,\n dtype,\n ):\n bbox_preds = outputs[:, :, :4] # [batch, n_anchors_all, 4]\n obj_preds = outputs[:, :, 4:5] # [batch, n_anchors_all, 1]\n cls_preds = outputs[:, :, 5:] # [batch, n_anchors_all, n_cls]\n\n # calculate targets\n nlabel = (labels.sum(dim=2) > 0).sum(dim=1) # number of objects\n\n total_num_anchors = outputs.shape[1]\n x_shifts = torch.cat(x_shifts, 1) # [1, n_anchors_all]\n y_shifts = torch.cat(y_shifts, 1) # [1, n_anchors_all]\n expanded_strides = torch.cat(expanded_strides, 1)\n if self.use_l1:\n origin_preds = torch.cat(origin_preds, 1)\n\n cls_targets = []\n reg_targets = []\n l1_targets = []\n obj_targets = []\n fg_masks = []\n\n num_fg = 0.0\n num_gts = 0.0\n\n for batch_idx in range(outputs.shape[0]):\n num_gt = int(nlabel[batch_idx])\n num_gts += num_gt\n if num_gt == 0:\n cls_target = outputs.new_zeros((0, self.num_classes))\n reg_target = outputs.new_zeros((0, 4))\n l1_target = outputs.new_zeros((0, 4))\n obj_target = outputs.new_zeros((total_num_anchors, 1))\n fg_mask = outputs.new_zeros(total_num_anchors).bool()\n else:\n gt_bboxes_per_image = labels[batch_idx, :num_gt, 1:5]\n gt_classes = labels[batch_idx, :num_gt, 0]\n bboxes_preds_per_image = bbox_preds[batch_idx]\n\n try:\n (\n gt_matched_classes,\n fg_mask,\n pred_ious_this_matching,\n matched_gt_inds,\n num_fg_img,\n ) = self.get_assignments( # noqa\n batch_idx,\n num_gt,\n gt_bboxes_per_image,\n gt_classes,\n bboxes_preds_per_image,\n expanded_strides,\n x_shifts,\n y_shifts,\n cls_preds,\n obj_preds,\n )\n except RuntimeError as e:\n # TODO: the string might change, consider a better way\n if \"CUDA out of memory. \" not in str(e):\n raise # RuntimeError might not caused by CUDA OOM\n\n logger.error(\n \"OOM RuntimeError is raised due to the huge memory cost during label assignment. \\\n CPU mode is applied in this batch. If you want to avoid this issue, \\\n try to reduce the batch size or image size.\"\n )\n torch.cuda.empty_cache()\n (\n gt_matched_classes,\n fg_mask,\n pred_ious_this_matching,\n matched_gt_inds,\n num_fg_img,\n ) = self.get_assignments( # noqa\n batch_idx,\n num_gt,\n gt_bboxes_per_image,\n gt_classes,\n bboxes_preds_per_image,\n expanded_strides,\n x_shifts,\n y_shifts,\n cls_preds,\n obj_preds,\n \"cpu\",\n )\n\n torch.cuda.empty_cache()\n num_fg += num_fg_img\n\n cls_target = F.one_hot(\n gt_matched_classes.to(torch.int64), self.num_classes\n ) * pred_ious_this_matching.unsqueeze(-1)\n obj_target = fg_mask.unsqueeze(-1)\n reg_target = gt_bboxes_per_image[matched_gt_inds]\n if self.use_l1:\n l1_target = self.get_l1_target(\n outputs.new_zeros((num_fg_img, 4)),\n gt_bboxes_per_image[matched_gt_inds],\n expanded_strides[0][fg_mask],\n x_shifts=x_shifts[0][fg_mask],\n y_shifts=y_shifts[0][fg_mask],\n )\n\n cls_targets.append(cls_target)\n reg_targets.append(reg_target)\n obj_targets.append(obj_target.to(dtype))\n fg_masks.append(fg_mask)\n if self.use_l1:\n l1_targets.append(l1_target)\n\n cls_targets = torch.cat(cls_targets, 0)\n reg_targets = torch.cat(reg_targets, 0)\n obj_targets = torch.cat(obj_targets, 0)\n fg_masks = torch.cat(fg_masks, 0)\n if self.use_l1:\n l1_targets = torch.cat(l1_targets, 0)\n\n num_fg = max(num_fg, 1)\n loss_iou = (\n self.iou_loss(bbox_preds.view(-1, 4)[fg_masks], reg_targets)\n ).sum() / num_fg\n loss_obj = (\n self.bcewithlog_loss(obj_preds.view(-1, 1), obj_targets)\n ).sum() / num_fg\n loss_cls = (\n self.bcewithlog_loss(\n cls_preds.view(-1, self.num_classes)[fg_masks], cls_targets\n )\n ).sum() / num_fg\n if self.use_l1:\n loss_l1 = (\n self.l1_loss(origin_preds.view(-1, 4)[fg_masks], l1_targets)\n ).sum() / num_fg\n else:\n loss_l1 = 0.0\n\n reg_weight = 5.0\n loss = reg_weight * loss_iou + loss_obj + loss_cls + loss_l1\n\n return (\n loss,\n reg_weight * loss_iou,\n loss_obj,\n loss_cls,\n loss_l1,\n num_fg / max(num_gts, 1),\n )\n\n def get_l1_target(self, l1_target, gt, stride, x_shifts, y_shifts, eps=1e-8):\n l1_target[:, 0] = gt[:, 0] / stride - x_shifts\n l1_target[:, 1] = gt[:, 1] / stride - y_shifts\n l1_target[:, 2] = torch.log(gt[:, 2] / stride + eps)\n l1_target[:, 3] = torch.log(gt[:, 3] / stride + eps)\n return l1_target\n\n @torch.no_grad()\n def get_assignments(\n self,\n batch_idx,\n num_gt,\n gt_bboxes_per_image,\n gt_classes,\n bboxes_preds_per_image,\n expanded_strides,\n x_shifts,\n y_shifts,\n cls_preds,\n obj_preds,\n mode=\"gpu\",\n ):\n\n if mode == \"cpu\":\n print(\"-----------Using CPU for the Current Batch-------------\")\n gt_bboxes_per_image = gt_bboxes_per_image.cpu().float()\n bboxes_preds_per_image = bboxes_preds_per_image.cpu().float()\n gt_classes = gt_classes.cpu().float()\n expanded_strides = expanded_strides.cpu().float()\n x_shifts = x_shifts.cpu()\n y_shifts = y_shifts.cpu()\n\n fg_mask, geometry_relation = self.get_geometry_constraint(\n gt_bboxes_per_image,\n expanded_strides,\n x_shifts,\n y_shifts,\n )\n \n # NOTE: Fix `selected index k out of range`\n npa: int = fg_mask.sum().item() # number of positive anchors\n if npa == 0:\n gt_matched_classes = torch.zeros(0, device=fg_mask.device).long()\n pred_ious_this_matching = torch.rand(0, device=fg_mask.device)\n matched_gt_inds = gt_matched_classes\n num_fg = npa\n\n if mode == \"cpu\":\n gt_matched_classes = gt_matched_classes.cuda()\n fg_mask = fg_mask.cuda()\n pred_ious_this_matching = pred_ious_this_matching.cuda()\n matched_gt_inds = matched_gt_inds.cuda()\n num_fg = num_fg.cuda()\n\n return (\n gt_matched_classes,\n fg_mask,\n pred_ious_this_matching,\n matched_gt_inds,\n num_fg,\n )\n \n bboxes_preds_per_image = bboxes_preds_per_image[fg_mask]\n cls_preds_ = cls_preds[batch_idx][fg_mask]\n obj_preds_ = obj_preds[batch_idx][fg_mask]\n num_in_boxes_anchor = bboxes_preds_per_image.shape[0]\n\n if mode == \"cpu\":\n gt_bboxes_per_image = gt_bboxes_per_image.cpu()\n bboxes_preds_per_image = bboxes_preds_per_image.cpu()\n\n pair_wise_ious = bboxes_iou(gt_bboxes_per_image, bboxes_preds_per_image, False)\n\n gt_cls_per_image = (\n F.one_hot(gt_classes.to(torch.int64), self.num_classes)\n .float()\n )\n pair_wise_ious_loss = -torch.log(pair_wise_ious + 1e-8)\n\n if mode == \"cpu\":\n cls_preds_, obj_preds_ = cls_preds_.cpu(), obj_preds_.cpu()\n\n with torch.cuda.amp.autocast(enabled=False):\n cls_preds_ = (\n cls_preds_.float().sigmoid_() * obj_preds_.float().sigmoid_()\n ).sqrt()\n pair_wise_cls_loss = F.binary_cross_entropy(\n cls_preds_.unsqueeze(0).repeat(num_gt, 1, 1),\n gt_cls_per_image.unsqueeze(1).repeat(1, num_in_boxes_anchor, 1),\n reduction=\"none\"\n ).sum(-1)\n del cls_preds_\n\n cost = (\n pair_wise_cls_loss\n + 3.0 * pair_wise_ious_loss\n + float(1e6) * (~geometry_relation)\n )\n\n (\n num_fg,\n gt_matched_classes,\n pred_ious_this_matching,\n matched_gt_inds,\n ) = self.simota_matching(cost, pair_wise_ious, gt_classes, num_gt, fg_mask)\n del pair_wise_cls_loss, cost, pair_wise_ious, pair_wise_ious_loss\n\n if mode == \"cpu\":\n gt_matched_classes = gt_matched_classes.cuda()\n fg_mask = fg_mask.cuda()\n pred_ious_this_matching = pred_ious_this_matching.cuda()\n matched_gt_inds = matched_gt_inds.cuda()\n\n return (\n gt_matched_classes,\n fg_mask,\n pred_ious_this_matching,\n matched_gt_inds,\n num_fg,\n )\n\n def get_geometry_constraint(\n self, gt_bboxes_per_image, expanded_strides, x_shifts, y_shifts,\n ):\n \"\"\"\n Calculate whether the center of an object is located in a fixed range of\n an anchor. This is used to avert inappropriate matching. It can also reduce\n the number of candidate anchors so that the GPU memory is saved.\n \"\"\"\n expanded_strides_per_image = expanded_strides[0]\n x_centers_per_image = ((x_shifts[0] + 0.5) * expanded_strides_per_image).unsqueeze(0)\n y_centers_per_image = ((y_shifts[0] + 0.5) * expanded_strides_per_image).unsqueeze(0)\n\n # in fixed center\n center_radius = 1.5\n center_dist = expanded_strides_per_image.unsqueeze(0) * center_radius\n gt_bboxes_per_image_l = (gt_bboxes_per_image[:, 0:1]) - center_dist\n gt_bboxes_per_image_r = (gt_bboxes_per_image[:, 0:1]) + center_dist\n gt_bboxes_per_image_t = (gt_bboxes_per_image[:, 1:2]) - center_dist\n gt_bboxes_per_image_b = (gt_bboxes_per_image[:, 1:2]) + center_dist\n\n c_l = x_centers_per_image - gt_bboxes_per_image_l\n c_r = gt_bboxes_per_image_r - x_centers_per_image\n c_t = y_centers_per_image - gt_bboxes_per_image_t\n c_b = gt_bboxes_per_image_b - y_centers_per_image\n center_deltas = torch.stack([c_l, c_t, c_r, c_b], 2)\n is_in_centers = center_deltas.min(dim=-1).values > 0.0\n anchor_filter = is_in_centers.sum(dim=0) > 0\n geometry_relation = is_in_centers[:, anchor_filter]\n\n return anchor_filter, geometry_relation\n\n def simota_matching(self, cost, pair_wise_ious, gt_classes, num_gt, fg_mask):\n matching_matrix = torch.zeros_like(cost, dtype=torch.uint8)\n\n n_candidate_k = min(10, pair_wise_ious.size(1))\n # close augmention like mosaic will core when dt equals 0\n # https://github.com/Megvii-BaseDetection/YOLOX/issues/778\n topk_ious, _ = torch.topk(pair_wise_ious, n_candidate_k, dim=1)\n dynamic_ks = torch.clamp(topk_ious.sum(1).int(), min=1)\n for gt_idx in range(num_gt):\n _, pos_idx = torch.topk(\n cost[gt_idx], k=dynamic_ks[gt_idx], largest=False\n )\n matching_matrix[gt_idx][pos_idx] = 1\n\n del topk_ious, dynamic_ks, pos_idx\n\n anchor_matching_gt = matching_matrix.sum(0)\n # deal with the case that one anchor matches multiple ground-truths\n if anchor_matching_gt.max() > 1:\n multiple_match_mask = anchor_matching_gt > 1\n _, cost_argmin = torch.min(cost[:, multiple_match_mask], dim=0)\n matching_matrix[:, multiple_match_mask] *= 0\n matching_matrix[cost_argmin, multiple_match_mask] = 1\n fg_mask_inboxes = anchor_matching_gt > 0\n num_fg = fg_mask_inboxes.sum().item()\n\n fg_mask[fg_mask.clone()] = fg_mask_inboxes\n\n matched_gt_inds = matching_matrix[:, fg_mask_inboxes].argmax(0)\n gt_matched_classes = gt_classes[matched_gt_inds]\n\n pred_ious_this_matching = (matching_matrix * pair_wise_ious).sum(0)[\n fg_mask_inboxes\n ]\n return num_fg, gt_matched_classes, pred_ious_this_matching, matched_gt_inds\n\n def visualize_assign_result(self, xin, labels=None, imgs=None, save_prefix=\"assign_vis_\"):\n # original forward logic\n outputs, x_shifts, y_shifts, expanded_strides = [], [], [], []\n # TODO: use forward logic here.\n\n for k, (cls_conv, reg_conv, stride_this_level, x) in enumerate(\n zip(self.cls_convs, self.reg_convs, self.strides, xin[0])\n ):\n x = self.stems[k](x)\n cls_x = x\n reg_x = x\n\n cls_feat = cls_conv(cls_x)\n cls_output = self.cls_preds[k](cls_feat)\n reg_feat = reg_conv(reg_x)\n reg_output = self.reg_preds[k](reg_feat)\n obj_output = self.obj_preds[k](reg_feat)\n\n output = torch.cat([reg_output, obj_output, cls_output], 1)\n output, grid = self.get_output_and_grid(output, k, stride_this_level, xin[0][0].type())\n x_shifts.append(grid[:, :, 0])\n y_shifts.append(grid[:, :, 1])\n expanded_strides.append(\n torch.full((1, grid.shape[1]), stride_this_level).type_as(xin[0][0])\n )\n outputs.append(output)\n\n outputs = torch.cat(outputs, 1)\n bbox_preds = outputs[:, :, :4] # [batch, n_anchors_all, 4]\n obj_preds = outputs[:, :, 4:5] # [batch, n_anchors_all, 1]\n cls_preds = outputs[:, :, 5:] # [batch, n_anchors_all, n_cls]\n\n # calculate targets\n total_num_anchors = outputs.shape[1]\n x_shifts = torch.cat(x_shifts, 1) # [1, n_anchors_all]\n y_shifts = torch.cat(y_shifts, 1) # [1, n_anchors_all]\n expanded_strides = torch.cat(expanded_strides, 1)\n\n nlabel = (labels.sum(dim=2) > 0).sum(dim=1) # number of objects\n for batch_idx, (img, num_gt, label) in enumerate(zip(imgs, nlabel, labels)):\n img = imgs[batch_idx].permute(1, 2, 0).to(torch.uint8)\n num_gt = int(num_gt)\n if num_gt == 0:\n fg_mask = outputs.new_zeros(total_num_anchors).bool()\n else:\n gt_bboxes_per_image = label[:num_gt, 1:5]\n gt_classes = label[:num_gt, 0]\n bboxes_preds_per_image = bbox_preds[batch_idx]\n _, fg_mask, _, matched_gt_inds, _ = self.get_assignments( # noqa\n batch_idx, num_gt, gt_bboxes_per_image, gt_classes,\n bboxes_preds_per_image, expanded_strides, x_shifts,\n y_shifts, cls_preds, obj_preds,\n )\n\n img = img.cpu().numpy().copy() # copy is crucial here\n coords = torch.stack([\n ((x_shifts + 0.5) * expanded_strides).flatten()[fg_mask],\n ((y_shifts + 0.5) * expanded_strides).flatten()[fg_mask],\n ], 1)\n\n xyxy_boxes = cxcywh2xyxy(gt_bboxes_per_image)\n save_name = save_prefix + str(batch_idx) + \".png\"\n img = visualize_assign(img, xyxy_boxes, coords, matched_gt_inds, save_name)\n logger.info(f\"save img to {save_name}\")" }, { "identifier": "YOLOPAFPN", "path": "yolox/models/yolo_pafpn.py", "snippet": "class YOLOPAFPN(nn.Module):\n \"\"\"\n YOLOv3 model. Darknet 53 is the default backbone of this model.\n \"\"\"\n\n def __init__(\n self,\n depth=1.0,\n width=1.0,\n in_features=(\"dark3\", \"dark4\", \"dark5\"),\n in_channels=[256, 512, 1024],\n depthwise=False,\n act=\"silu\",\n ):\n super().__init__()\n self.backbone = CSPDarknet(depth, width, depthwise=depthwise, act=act)\n self.in_features = in_features\n self.in_channels = in_channels\n Conv = DWConv if depthwise else BaseConv\n\n self.upsample = nn.Upsample(scale_factor=2, mode=\"nearest\")\n self.lateral_conv0 = BaseConv(\n int(in_channels[2] * width), int(in_channels[1] * width), 1, 1, act=act\n )\n self.C3_p4 = CSPLayer(\n int(2 * in_channels[1] * width),\n int(in_channels[1] * width),\n round(3 * depth),\n False,\n depthwise=depthwise,\n act=act,\n ) # cat\n\n self.reduce_conv1 = BaseConv(\n int(in_channels[1] * width), int(in_channels[0] * width), 1, 1, act=act\n )\n self.C3_p3 = CSPLayer(\n int(2 * in_channels[0] * width),\n int(in_channels[0] * width),\n round(3 * depth),\n False,\n depthwise=depthwise,\n act=act,\n )\n\n # bottom-up conv\n self.bu_conv2 = Conv(\n int(in_channels[0] * width), int(in_channels[0] * width), 3, 2, act=act\n )\n self.C3_n3 = CSPLayer(\n int(2 * in_channels[0] * width),\n int(in_channels[1] * width),\n round(3 * depth),\n False,\n depthwise=depthwise,\n act=act,\n )\n\n # bottom-up conv\n self.bu_conv1 = Conv(\n int(in_channels[1] * width), int(in_channels[1] * width), 3, 2, act=act\n )\n self.C3_n4 = CSPLayer(\n int(2 * in_channels[1] * width),\n int(in_channels[2] * width),\n round(3 * depth),\n False,\n depthwise=depthwise,\n act=act,\n )\n\n def forward(self, input):\n \"\"\"\n Args:\n inputs: input images.\n\n Returns:\n Tuple[Tensor]: FPN feature.\n \"\"\"\n\n # backbone\n out_features, apollo_feature = self.backbone(input)\n features = [out_features[f] for f in self.in_features]\n [x2, x1, x0] = features\n\n fpn_out0 = self.lateral_conv0(x0) # 1024->512/32\n f_out0 = self.upsample(fpn_out0) # 512/16\n f_out0 = torch.cat([f_out0, x1], 1) # 512->1024/16\n f_out0 = self.C3_p4(f_out0) # 1024->512/16\n\n fpn_out1 = self.reduce_conv1(f_out0) # 512->256/16\n f_out1 = self.upsample(fpn_out1) # 256/8\n f_out1 = torch.cat([f_out1, x2], 1) # 256->512/8\n pan_out2 = self.C3_p3(f_out1) # 512->256/8\n\n p_out1 = self.bu_conv2(pan_out2) # 256->256/16\n p_out1 = torch.cat([p_out1, fpn_out1], 1) # 256->512/16\n pan_out1 = self.C3_n3(p_out1) # 512->512/16\n\n p_out0 = self.bu_conv1(pan_out1) # 512->512/32\n p_out0 = torch.cat([p_out0, fpn_out0], 1) # 512->1024/32\n pan_out0 = self.C3_n4(p_out0) # 1024->1024/32\n\n outputs = (pan_out2, pan_out1, pan_out0)\n # output fpn and feature map for apollo[dark2]\n return outputs, apollo_feature" } ]
import torch.nn as nn from .yolo_head import YOLOXHead from .yolo_pafpn import YOLOPAFPN
7,728
#!/usr/bin/env python # -*- encoding: utf-8 -*- # Copyright (c) Megvii Inc. All rights reserved. class YOLOX(nn.Module): """ YOLOX model module. The module list is defined by create_yolov3_modules function. The network returns loss values from three YOLO layers during training and detection results during test. """ def __init__(self, backbone=None, head=None): super().__init__() if backbone is None:
#!/usr/bin/env python # -*- encoding: utf-8 -*- # Copyright (c) Megvii Inc. All rights reserved. class YOLOX(nn.Module): """ YOLOX model module. The module list is defined by create_yolov3_modules function. The network returns loss values from three YOLO layers during training and detection results during test. """ def __init__(self, backbone=None, head=None): super().__init__() if backbone is None:
backbone = YOLOPAFPN()
1
2023-11-08 07:07:24+00:00
12k
ndiamant/spice
experiments/train_eval.py
[ { "identifier": "ConditionalHist", "path": "spice/conditional_histogram.py", "snippet": "class ConditionalHist(BaseLightning):\n def __init__(\n self, input_dim: int, hidden_dim: int,\n max_iter: int, bins: torch.Tensor,\n y_min: float,\n lr: float = 1e-3, wd: float = 0,\n ):\n super().__init__()\n self.save_hyperparameters()\n self.module = nn.Sequential(\n MLP(input_dim, hidden=hidden_dim, n_hidden=1, output_dim=bins.shape[0]),\n )\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"log bin probabilities\"\"\"\n return torch.log_softmax(self.module(x), dim=-1)\n\n def log_likelihood(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\n \"\"\"log likelihood of y | x\"\"\"\n bin_log_probs = self(x)\n return -F.nll_loss(bin_log_probs, y.squeeze(), reduction=\"none\")\n\n def likelihood(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\n return self.log_likelihood(x, y).exp()\n\n def get_loss(self, batch: list[torch.Tensor], prefix: str) -> torch.Tensor:\n x, y = batch\n loss = -self.log_likelihood(x, y).mean()\n self.epoch_log(f\"{prefix}/loss\", loss)\n return loss\n\n @torch.no_grad()\n def find_prob_threshold(self, x_val: torch.Tensor, y_val: torch.Tensor, alpha: float) -> float:\n \"\"\"\n alpha: mis-classification rate\n anything above threshold in likelihood should be in the prediction set\n https://people.eecs.berkeley.edu/~angelopoulos/publications/downloads/gentle_intro_conformal_dfuq.pdf\n \"\"\"\n n = len(y_val)\n q_level = math.ceil((n + 1) * (1 - alpha)) / n\n cal_scores = 1 - self.likelihood(x_val.to(self.device), y_val.to(self.device))\n q_hat = torch.quantile(cal_scores, q_level, interpolation=\"higher\").item()\n return 1 - q_hat\n\n @torch.no_grad()\n def get_extended_bins(self):\n extended_bins = torch.empty(self.hparams.bins.shape[0] + 1)\n extended_bins[0] = self.hparams.y_min\n extended_bins[1:] = self.hparams.bins\n return extended_bins\n\n @torch.no_grad()\n def get_bin_widths(self) -> torch.Tensor:\n extended_bins = self.get_extended_bins()\n return extended_bins[1:] - extended_bins[:-1]\n\n @torch.no_grad()\n def get_metrics(\n self, x_test: torch.Tensor, y_test: torch.Tensor, threshold: float,\n ) -> dict[str, float]:\n test_prob = self(x_test.to(self.device)).exp().to(y_test.device)\n prediction_set = test_prob > threshold\n covered = (\n (\n F.one_hot(y_test.squeeze(), num_classes=self.hparams.bins.shape[0])\n & prediction_set\n ).any(dim=1)\n ).float()\n bin_sizes = self.get_bin_widths()\n sizes = (bin_sizes.unsqueeze(0) * prediction_set).sum(dim=1)\n return compute_conformal_metrics(x_test, y_test.float() / y_test.max().item(), sizes, covered)\n\n @torch.no_grad()\n def get_hpd_threshold(self, x_val: torch.Tensor, y_val: torch.Tensor, alpha: float) -> float:\n x_val = x_val.to(self.device)\n y_val = y_val.to(self.device)\n all_probs = self(x_val).exp()\n y_probs = all_probs.gather(index=y_val, dim=1)\n bin_sizes = self.get_bin_widths()\n score = integrate_categorical_below_threshold(all_probs.cpu(), y_probs.cpu(), bin_sizes.cpu())\n return -score_to_q_hat(-score, alpha)\n\n @torch.no_grad()\n def get_hpd_metrics(\n self, x_test: torch.Tensor, y_test: torch.Tensor, threshold: float,\n ) -> dict[str, float]:\n # HPD\n probs = self(x_test.to(self.device)).exp().cpu()\n bin_sizes = self.get_bin_widths()\n hpd_cutoffs = find_hpd_cutoffs(probs, bin_sizes.cpu(), threshold)\n bin_mask = probs >= hpd_cutoffs.unsqueeze(1)\n # size\n sizes = (bin_sizes.unsqueeze(0) * bin_mask).sum(dim=1)\n y_onehot = F.one_hot(y_test.squeeze(), num_classes=self.hparams.bins.shape[0])\n covered = (y_onehot & bin_mask).any(dim=1).float()\n # coverage\n metrics = compute_conformal_metrics(x_test, y_test.float() / y_test.max().item(), sizes, covered)\n metrics = {\n f\"hpd_{name}\": val for name, val in metrics.items()\n }\n return metrics" }, { "identifier": "CHR", "path": "spice/chr.py", "snippet": "class CHR(BaseLightning):\n def __init__(\n self, input_dim: int, hidden_dim: int,\n max_iter: int, n_bins: int,\n lr: float = 1e-3, wd: float = 0,\n y_min: float = 0, y_max: float = 1,\n hist_steps: int = 1000,\n ):\n super().__init__()\n self.save_hyperparameters()\n self.mlp = MLP(input_dim, hidden=hidden_dim, n_hidden=1, output_dim=n_bins)\n self.register_buffer(\"quantiles\", torch.linspace(0.01, 0.99, n_bins))\n self.loss_fn = AllQuantileLoss(quantiles=self.quantiles)\n\n def forward(self, x: torch.Tensor, sort: bool = True) -> torch.Tensor:\n y = self.mlp(x)\n if sort:\n return y.sort(dim=-1).values\n return y\n\n def get_loss(self, batch: list[torch.Tensor], prefix: str) -> torch.Tensor:\n x, y = batch\n pred = self(x)\n loss = self.loss_fn(pred, y)\n self.epoch_log(f\"{prefix}/loss\", loss)\n return loss\n\n @torch.no_grad()\n def calibrate(self, x_val: torch.Tensor, y_val: torch.Tensor, alpha: float):\n chr_ = CHRCalibrate(self.quantiles.squeeze().cpu().numpy(), randomize=False)\n q_calib = self(x_val.to(self.device)).cpu().numpy()\n chr_.calibrate(q_calib=q_calib, Y=y_val.squeeze().cpu().numpy(), alpha=alpha)\n return chr_\n\n @torch.no_grad()\n def get_metrics(\n self, x_test: torch.Tensor, y_test: torch.Tensor, chr: CHRCalibrate,\n ) -> dict[str, float]:\n q_new = self(x_test.to(self.device)).cpu().numpy()\n bands = chr.predict(q_new=q_new)\n y = y_test.squeeze().numpy()\n covered = ((y >= bands[:, 0]) & (y <= bands[:, 1]))\n sizes = torch.tensor(bands[:, 1] - bands[:, 0], dtype=y_test.dtype)\n return compute_conformal_metrics(x_test, y_test, sizes=sizes, covered=torch.tensor(covered))" }, { "identifier": "RegressionData", "path": "spice/datasets.py", "snippet": "class RegressionData(LightningDataModule):\n def __init__(\n self, name: str, y_scaling: str = \"min_max\",\n batch_size: int = 512, discretize_n_bins: int = None,\n train_seed: int = 57771, smart_discretize: bool = True,\n ):\n super().__init__()\n x, y = get_dataset(name)\n y = y.reshape(y.shape[0], 1)\n np.random.seed(112123)\n n = y.shape[0]\n # train, val, calibrate, val calibration, test\n dset_idx = np.random.choice(list(range(5)), p=[0.5, 0.1, 0.1, 0.1, 0.2], size=(n,))\n test_idx = dset_idx == 4\n # shuffle the train split based on the seed\n np.random.seed(train_seed)\n dset_idx[~test_idx] = np.random.permutation(dset_idx[~test_idx])\n train_idx = dset_idx == 0\n val_idx = dset_idx == 1\n cal_idx = dset_idx == 2\n cal_val_idx = dset_idx == 3\n # scaling\n y_scaler = {\n \"min_max\": MinMaxScaler(feature_range=(0, 1 - 1e-5)),\n \"std\": StandardScaler(),\n }[y_scaling]\n y_train = y[train_idx]\n y_scaler.fit(y_train)\n x_train = x[train_idx]\n x_scaler = StandardScaler()\n x_scaler.fit(x_train)\n x = torch.tensor(x_scaler.transform(x), dtype=torch.float32)\n y = torch.tensor(y_scaler.transform(y), dtype=torch.float32)\n # discretize for histogram case\n self.bins = None\n if discretize_n_bins is not None:\n transformed_train_y = torch.tensor(y_scaler.transform(y_train))\n if smart_discretize:\n self.bins = select_bins(transformed_train_y, discretize_n_bins)\n else:\n self.bins = torch.linspace(\n 1 / discretize_n_bins, 1, discretize_n_bins,\n )\n y = discretize(y, self.bins)\n train_dset = TensorDataset(x[train_idx], y[train_idx])\n self.train_dset = train_dset\n self.val_dset = TensorDataset(x[val_idx], y[val_idx])\n self.cal_dset = TensorDataset(x[cal_idx], y[cal_idx])\n self.cal_val_dset = TensorDataset(x[cal_val_idx], y[cal_val_idx])\n self.test_dset = TensorDataset(x[test_idx], y[test_idx])\n # save stuff\n self.batch_size = batch_size\n self.x_scaler = x_scaler\n self.y_scaler = y_scaler\n self.y_min_max_scaler = MinMaxScaler(feature_range=(0, 1 - 1e-5)).fit(\n train_dset.tensors[1], # used to keep size evaluations on the same scale\n )\n self.test_idx = test_idx\n\n def train_dataloader(self) -> DataLoader:\n return DataLoader(self.train_dset, shuffle=True, batch_size=self.batch_size)\n\n def val_dataloader(self) -> DataLoader:\n return DataLoader(self.val_dset, shuffle=True, batch_size=self.batch_size)\n\n def test_dataloader(self) -> DataLoader:\n return DataLoader(self.test_dset, shuffle=False, batch_size=self.batch_size)\n\n def train_batches(self, max_batches: int = 100) -> int:\n return min(max_batches, len(self.train_dataloader()))\n\n def val_batches(self, max_batches: int = 10) -> int:\n return min(max_batches, len(self.val_dataloader()))" }, { "identifier": "CQR", "path": "spice/cqr.py", "snippet": "class CQR(BaseLightning):\n \"\"\"conformalized quantile regression\"\"\"\n def __init__(\n self, input_dim: int, hidden_dim: int,\n low_quantile: float, high_quantile: float,\n max_iter: int, lr: float = 1e-3, wd: float = 0,\n ):\n super().__init__()\n self.save_hyperparameters()\n self.module = MLP(\n input_dim=input_dim, hidden=hidden_dim, n_hidden=1, output_dim=2,\n )\n self.quantiles = torch.tensor([low_quantile, high_quantile])\n self.loss_fn = AllQuantileLoss(quantiles=self.quantiles)\n\n def forward(self, x, sort: bool = False) -> torch.Tensor:\n qs = self.module(x)\n if sort:\n qs = qs.sort(dim=-1).values\n return qs\n\n def get_loss(self, batch: list[torch.Tensor], prefix: str) -> torch.Tensor:\n x, y = batch\n y_pred = self(x)\n loss = self.loss_fn(y_pred, y)\n self.epoch_log(f\"{prefix}/loss\", loss)\n return loss\n\n @torch.no_grad()\n def conformity_score(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\n pred_quantiles = self(x, sort=True)\n lower = pred_quantiles[:, 0] - y.squeeze()\n upper = y.squeeze() - pred_quantiles[:, 1]\n return torch.maximum(lower, upper)\n\n @torch.no_grad()\n def get_q_hat(self, x_val: torch.Tensor, y_val: torch.Tensor, alpha: float) -> float:\n conf_score = self.conformity_score(x_val.to(self.device), y_val.to(self.device))\n q_hat = score_to_q_hat(conf_score, alpha)\n return q_hat\n\n @torch.no_grad()\n def get_metrics(\n self, x_test: torch.Tensor, y_test: torch.Tensor, q_hat: float,\n ) -> dict[str, float]:\n pred_quantiles = self(x_test.to(self.device), sort=True).cpu()\n left_interval = pred_quantiles[:, 0] - q_hat\n right_interval = pred_quantiles[:, 1] + q_hat\n covered = (\n (y_test.squeeze() > left_interval)\n & (y_test.squeeze() < right_interval)\n )\n sizes = (right_interval - left_interval)\n return compute_conformal_metrics(x_test, y_test, sizes=sizes, covered=covered)\n\n @torch.no_grad()\n def prediction_interval(\n self, x: torch.Tensor, conformity_score: torch.Tensor,\n alpha: float,\n ) -> torch.Tensor:\n \"\"\"alpha is mis-coverage rate\"\"\"\n pred_quantiles = self(x, sort=True)\n n_calibrate = conformity_score.shape[0]\n quantile = conformity_score.quantile(\n (1 - alpha) * (1 + 1 / n_calibrate)\n )\n pred_quantiles[:, 0] -= quantile\n pred_quantiles[:, 1] += quantile\n return pred_quantiles" }, { "identifier": "PCP", "path": "spice/pcp.py", "snippet": "class PCP(BaseLightning):\n def __init__(\n self, input_dim: int, hidden_dim: int,\n max_iter: int, lr: float = 1e-3, wd: float = 0,\n n_mixture: int = 10,\n ):\n super().__init__()\n self.save_hyperparameters()\n self.cond_gmm = ConditionalGMM(input_dim, hidden_dim, n_mixture)\n\n def forward(self, x: torch.Tensor) -> D.MixtureSameFamily:\n return self.cond_gmm(x)\n\n def get_loss(self, batch: list[torch.Tensor], prefix: str) -> torch.Tensor:\n x, y = batch\n gmm = self(x)\n log_p = gmm.log_prob(y.squeeze())\n loss = -log_p.nanmean()\n self.epoch_log(f\"{prefix}/loss\", loss)\n return loss\n\n @torch.no_grad()\n def get_filtered_samples(\n self, x: torch.Tensor, k: int = 50, beta: float = 0.2,\n ) -> torch.Tensor:\n gmm = self(x)\n samples = gmm.sample((k,)) # K = 50 x batch_size\n # filter\n densities = gmm.log_prob(samples) # K = 50 x batch_size\n densities_argsort = densities.argsort(dim=0)\n n_filter = int(k * beta)\n keep_idx = densities_argsort[n_filter:] # k = 40 x batch_size\n filtered_samples = samples[keep_idx, torch.arange(x.shape[0])]\n return filtered_samples.T\n\n @torch.no_grad()\n def get_q_hat(self, x_val: torch.Tensor, y_val: torch.Tensor, alpha: float) -> float:\n # https://github.com/Zhendong-Wang/Probabilistic-Conformal-Prediction/blob/54a31cbfe0c87182cbc4351f1d12a59a65452a40/pcp/pcp.py#L28\n n = y_val.shape[0]\n # sample\n filtered_samples = self.get_filtered_samples(x_val.to(self.device)).to(y_val.device)\n # conformal\n score = (filtered_samples - y_val.view(n, 1)).abs().min(dim=1).values\n return score_to_q_hat(score, alpha)\n\n @torch.no_grad()\n def get_prediction_intervals(\n self, x: torch.Tensor, q_hat: float, parallel_workers: int = 0,\n ) -> list[Union]:\n # sample\n filtered_samples = self.get_filtered_samples(x).cpu()\n desc = \"calculating intervals from samples\"\n fn = partial(union_from_samples, q_hat=q_hat)\n fn_in = filtered_samples\n if parallel_workers:\n bands = process_map(\n fn, fn_in, max_workers=parallel_workers, desc=desc,\n chunksize=max(1, min(100, fn_in.shape[0] // (2 * parallel_workers)))\n )\n else:\n bands = list(tqdm(map(fn, fn_in), desc=desc, total=len(fn_in)))\n return bands\n\n @torch.no_grad()\n def get_metrics(\n self, x_test: torch.Tensor, y_test: torch.Tensor, q_hat: float,\n interval_workers: int = 0,\n ) -> dict[str, float]:\n intervals = self.get_prediction_intervals(x_test.to(self.device), q_hat, interval_workers)\n n = y_test.shape[0]\n covered = torch.zeros(n)\n sizes = torch.empty(n)\n for i, (union, yi) in enumerate(tqdm(\n zip(intervals, y_test),\n desc=\"calculating coverage and size\", total=n,\n )):\n sizes[i] = float(union.measure)\n if union.contains(yi.item()):\n covered[i] = 1\n return compute_conformal_metrics(\n x_test, y_test, sizes=sizes, covered=covered,\n )" }, { "identifier": "timestamp", "path": "spice/utils.py", "snippet": "def timestamp() -> str:\n now = datetime.now()\n return now.strftime(\"%Y-%m-%d_%H-%M-%S-%f\")" }, { "identifier": "rename_metrics", "path": "spice/utils.py", "snippet": "def rename_metrics(metrics: dict[str, float], prefix: str, alpha: float) -> dict[str, float]:\n return {\n f\"{prefix}/{name}_at_{alpha}\": val\n for name, val in metrics.items()\n }" }, { "identifier": "WANDB_PROJECT", "path": "spice/utils.py", "snippet": "WANDB_PROJECT = \"spice\"" }, { "identifier": "SPICEn2", "path": "spice/spice_n2.py", "snippet": "class SPICEn2(BaseLightning):\n def __init__(\n self, input_dim: int, hidden_dim: int, n_knots: int,\n learn_bin_widths: bool,\n max_iter: int, lr: float = 1e-3, wd: float = 0,\n smart_bin_init_w: torch.Tensor = None, smart_bin_init_h: torch.Tensor = None,\n min_f_bar_val: float = 1e-2,\n ):\n super().__init__()\n self.save_hyperparameters()\n self.encoder = nn.Sequential(\n MLP(input_dim, hidden=hidden_dim, n_hidden=0),\n )\n self.density = ConditionalQuadratic(\n hidden_dim, n_knots, learn_bin_widths=learn_bin_widths,\n min_f_bar_val=min_f_bar_val,\n bin_width_init=smart_bin_init_w, bin_height_init=smart_bin_init_h,\n )\n\n def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\n z = self.encoder(x)\n return self.density(z, y.clip(0, 1 - 1e-3))\n\n def get_loss(self, batch: list[torch.Tensor], prefix: str) -> torch.Tensor:\n x, y = batch\n likelihood = self(x, y)\n self.epoch_log(f\"{prefix}/likelihood\", likelihood.mean())\n log_likelihood = never_nan_log(likelihood, eps=1e-5)\n self.epoch_log(f\"{prefix}/log_likelihood\", log_likelihood.mean())\n self.epoch_log(f\"{prefix}/log_likelihood_std\", log_likelihood.std(dim=0))\n self.epoch_log(f\"{prefix}/log_likelihood_min\", log_likelihood.min())\n self.epoch_log(f\"{prefix}/log_likelihood_max\", log_likelihood.max())\n loss = -log_likelihood.mean()\n self.epoch_log(f\"{prefix}/loss\", loss)\n return loss\n\n @torch.no_grad()\n def get_threshold(self, x_val: torch.Tensor, y_val: torch.Tensor, alpha: float) -> float:\n score = -self(x_val.to(self.device), y_val.to(self.device))\n q_hat = score_to_q_hat(score, alpha)\n return -q_hat\n\n @torch.no_grad()\n def get_intervals(self, x: torch.Tensor, cutoff: float) -> tuple[\n tuple[torch.Tensor, torch.Tensor], tuple[torch.Tensor, torch.Tensor, torch.Tensor],\n ]:\n z = self.encoder(x)\n (x0, x1), (a, b, c) = self.density.get_quadratic_coeffs(z)\n return (x0, x1), get_intervals(x0, x1, a, b, c, cutoff)\n\n @torch.no_grad()\n def get_metrics(\n self, x_test: torch.Tensor, y_test: torch.Tensor, threshold: float,\n ) -> dict[str, float]:\n test_likelihood = self(x_test.to(self.device), y_test.to(self.device))\n covered = (test_likelihood > threshold).float()\n (x0, x1), (left, right, inside) = self.get_intervals(x_test.to(self.device), threshold)\n sizes = get_interval_sizes(x0, x1, left, right, inside)\n metrics = compute_conformal_metrics(x_test, y_test, sizes, covered)\n metrics[\"approx_size\"] = self.approx_size(x_test, threshold)\n return metrics\n\n @torch.no_grad()\n def approx_size(self, x_test: torch.Tensor, threshold: float):\n y_approx_area = torch.linspace(0, 1, 1000, device=self.device).repeat((x_test.shape[0], 1))\n density_grid = self(\n x_test.to(self.device), y_approx_area,\n )\n return (density_grid > threshold).float().mean().item()\n\n @torch.no_grad()\n def get_hpd_threshold(self, x_val: torch.Tensor, y_val: torch.Tensor, alpha: float) -> float:\n x_val = x_val.to(self.device)\n y_val = y_val.to(self.device)\n z = self.encoder(x_val)\n (x0, x1), (a, b, c) = self.density.get_quadratic_coeffs(z)\n y_density = self(x_val, y_val)\n score = integrate_above_cutoff(x0, x1, a, b, c, y_density)\n q_hat = score_to_q_hat(score, alpha)\n return q_hat\n\n @torch.no_grad()\n def get_hpd_intervals(self, x: torch.Tensor, cutoff: float) -> tuple[\n tuple[torch.Tensor, torch.Tensor, torch.Tensor], tuple[torch.Tensor, torch.Tensor, torch.Tensor],\n ]:\n z = self.encoder(x.to(self.device))\n (x0, x1), (a, b, c) = self.density.get_quadratic_coeffs(z)\n hpd_cutoffs = find_hpd_cutoff(x0.to(self.device), x1.to(self.device), a, b, c, cutoff)\n return (x0, x1, hpd_cutoffs), get_intervals(x0, x1, a, b, c, hpd_cutoffs)\n\n @torch.no_grad()\n def get_hpd_metrics(\n self, x_test: torch.Tensor, y_test: torch.Tensor, threshold: float,\n ) -> dict[str, float]:\n (x0, x1, cutoffs), intervals = self.get_hpd_intervals(x_test, threshold)\n sizes = get_interval_sizes(x0, x1, *intervals)\n covered = y_in_interval(y_test.to(x0.device), x0, x1, *intervals)\n metrics = compute_conformal_metrics(x_test, y_test, sizes, covered)\n metrics[\"approx_size\"] = self.approx_size(x_test, cutoffs)\n metrics = {\n f\"hpd_{name}\": val for name, val in metrics.items()\n }\n return metrics" }, { "identifier": "smart_bin_init", "path": "spice/spice_n2.py", "snippet": "def smart_bin_init(y_train: torch.Tensor, n_knots: int) -> tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n return:\n x positions: n_knots - 1\n y_positions: n_knots - 1\n \"\"\"\n quantiles = unique_quantile(y_train.squeeze(), n_knots)\n heights = torch.histogram(y_train.squeeze(), quantiles, density=True).hist\n widths = quantiles[1:] - quantiles[:-1]\n return widths, heights" }, { "identifier": "SPICEn1", "path": "spice/spice_n1.py", "snippet": "class SPICEn1(BaseLightning):\n def __init__(\n self, input_dim: int, hidden_dim: int, n_knots: int,\n learn_bin_widths: bool,\n max_iter: int, lr: float = 1e-3, wd: float = 0,\n bin_width_init: torch.Tensor = None, bin_height_init: torch.Tensor = None,\n min_likelihood: float = 1e-2,\n ):\n super().__init__()\n self.save_hyperparameters()\n self.encoder = nn.Sequential(\n MLP(input_dim, hidden=hidden_dim, n_hidden=0),\n )\n self.density = ConditionalPiecewiseLinearDensity(\n hidden_dim, n_knots, learn_bin_widths=learn_bin_widths,\n min_likelihood=min_likelihood,\n bin_width_init=bin_width_init, bin_height_init=bin_height_init,\n )\n\n def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\n z = self.encoder(x)\n return self.density(z, y)\n\n def get_loss(self, batch: list[torch.Tensor], prefix: str) -> torch.Tensor:\n x, y = batch\n likelihood = self(x, y)\n self.epoch_log(f\"{prefix}/likelihood\", likelihood.mean())\n log_likelihood = never_nan_log(likelihood, eps=1e-5)\n self.epoch_log(f\"{prefix}/log_likelihood\", log_likelihood.mean())\n self.epoch_log(f\"{prefix}/log_likelihood_std\", log_likelihood.std(dim=0).mean())\n self.epoch_log(f\"{prefix}/log_likelihood_min\", log_likelihood.min())\n self.epoch_log(f\"{prefix}/log_likelihood_max\", log_likelihood.max())\n loss = -log_likelihood.mean()\n self.epoch_log(f\"{prefix}/loss\", loss)\n return loss\n\n @torch.no_grad()\n def get_threshold(self, x_val: torch.Tensor, y_val: torch.Tensor, alpha: float) -> float:\n score = -self(x_val.to(self.device), y_val.to(self.device))\n q_hat = score_to_q_hat(score, alpha)\n return -q_hat\n\n @torch.no_grad()\n def get_intervals(self, x: torch.Tensor, cutoff: float) -> tuple[torch.Tensor, torch.Tensor]:\n z = self.encoder(x)\n knot_pos, knot_height = self.density.get_knot_pos_height(z)\n return get_intervals(knot_pos, knot_height, cutoff)\n\n @torch.no_grad()\n def get_metrics(\n self, x_test: torch.Tensor, y_test: torch.Tensor, threshold: float,\n ) -> dict[str, float]:\n test_likelihood = self(x_test.to(self.device), y_test.to(self.device))\n covered = (test_likelihood > threshold)\n left, right = self.get_intervals(x_test.to(self.device), threshold)\n sizes = get_interval_sizes(left, right)\n return compute_conformal_metrics(x_test, y_test, sizes, covered)\n\n @torch.no_grad()\n def get_hpd_threshold(self, x_val: torch.Tensor, y_val: torch.Tensor, alpha: float) -> float:\n x_val = x_val.to(self.device)\n y_val = y_val.to(self.device)\n z = self.encoder(x_val)\n knot_pos, knot_height = self.density.get_knot_pos_height(z)\n y_density = self(x_val, y_val)\n score = integrate_below_cutoff(knot_pos, knot_height, y_density)\n return -score_to_q_hat(-score, alpha)\n\n @torch.no_grad()\n def get_knots_and_hpd_cutoffs(self, x: torch.Tensor, cutoff: float) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n z = self.encoder(x.to(self.device))\n knot_pos, knot_height = self.density.get_knot_pos_height(z)\n hpd_cutoffs = find_hpd_cutoff(knot_pos.to(self.device), knot_height.to(self.device), cutoff)\n return knot_pos, knot_height, hpd_cutoffs\n\n @torch.no_grad()\n def get_hpd_intervals(self, x: torch.Tensor, cutoff: float) -> tuple[torch.Tensor, torch.Tensor]:\n knot_pos, knot_height, hpd_cutoffs = self.get_knots_and_hpd_cutoffs(x, cutoff)\n return get_intervals(knot_pos, knot_height, hpd_cutoffs)\n\n @torch.no_grad()\n def get_hpd_metrics(\n self, x_test: torch.Tensor, y_test: torch.Tensor, threshold: float,\n ) -> dict[str, float]:\n left, right = self.get_hpd_intervals(x_test, threshold)\n sizes = get_interval_sizes(left, right)\n covered = (\n (y_test >= left.cpu())\n & (y_test < right.cpu())\n ).any(dim=1)\n metrics = compute_conformal_metrics(x_test, y_test, sizes, covered)\n metrics = {\n f\"hpd_{name}\": val for name, val in metrics.items()\n }\n return metrics" }, { "identifier": "smart_bin_init", "path": "spice/spice_n1.py", "snippet": "@torch.no_grad()\ndef smart_bin_init(y_train: torch.Tensor, n_knots: int) -> tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n return:\n x positions: n_knots - 1\n y_positions: n_knots - 1\n \"\"\"\n quantiles = unique_quantile(y_train.squeeze(), n_knots + 1)\n heights = torch.histogram(y_train.squeeze(), bins=quantiles, density=True).hist\n final_heights = heights\n quantiles = unique_quantile(y_train.squeeze(), n_knots)\n widths = quantiles[1:] - quantiles[:-1]\n return widths, final_heights" } ]
import argparse import os import wandb from pytorch_lightning import seed_everything, Trainer from pytorch_lightning.loggers import WandbLogger from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping, LearningRateMonitor from spice.conditional_histogram import ConditionalHist from spice.chr import CHR from spice.datasets import RegressionData from spice.cqr import CQR from spice.pcp import PCP from spice.utils import timestamp, rename_metrics, WANDB_PROJECT from spice.spice_n2 import SPICEn2, smart_bin_init from spice.spice_n1 import SPICEn1 from spice.spice_n1 import smart_bin_init as spice_n1_smart_bin_init
9,236
trainer.fit(model, datamodule=data) model = model.load_from_checkpoint(checkpoint.best_model_path) model: ConditionalHist = model.eval() # run conformal x_cal, y_cal = data.cal_dset.tensors x_cal_val, y_cal_val = data.cal_val_dset.tensors thresholds = [] hpd_thresholds = [] for alpha in alphas: threshold = model.find_prob_threshold(x_cal, y_cal, alpha) thresholds.append(threshold) metrics = model.get_metrics(x_cal_val, y_cal_val, threshold) logger.log_metrics(rename_metrics(metrics, "val", alpha)) # hpd hpd_threshold = model.get_hpd_threshold(x_cal, y_cal, alpha) hpd_thresholds.append(hpd_threshold) metrics = model.get_hpd_metrics(x_cal_val, y_cal_val, hpd_threshold) logger.log_metrics(rename_metrics(metrics, "val", alpha)) # testing if not run_test: wandb.finish() return model, data, thresholds x_test, y_test = data.test_dset.tensors for alpha, threshold, hpd_threshold in zip(alphas, thresholds, hpd_thresholds): thresholds.append(threshold) metrics = model.get_metrics(x_test, y_test, threshold) logger.log_metrics(rename_metrics(metrics, "test", alpha)) # hpd hpd_thresholds.append(hpd_threshold) metrics = model.get_hpd_metrics(x_test, y_test, hpd_threshold) logger.log_metrics(rename_metrics(metrics, "test", alpha)) def run_cqr( dataset_name: str, lr: float, wd: float, epochs: int, hidden: int, seed: int, alphas: list[float], qr_interval: float, # saving settings checkpoint_folder: str, version: str, wandb_log_dir: str, # run_test: bool = False, ): ts = timestamp() name = f"cqr_version-{version}_{ts}" trainer, logger, checkpoint, data = setup_trainer_and_data( name=name, wandb_log_dir=wandb_log_dir, epochs=epochs, version=version, dataset_name=dataset_name, seed=seed, checkpoint_folder=checkpoint_folder, y_scaling="std", ) seed_everything(seed) wandb.config.update({ "dataset_name": dataset_name, "alphas": alphas, "model": "cqr", "qr_interval": qr_interval, "seed": seed, }) # set up model x_dim = data.train_dset.tensors[0].shape[1] low_quantile = round((1 - qr_interval) / 2, 3) high_quantile = 1 - low_quantile model = CQR( input_dim=x_dim, hidden_dim=hidden, lr=lr, wd=wd, max_iter=trainer.max_steps, low_quantile=low_quantile, high_quantile=high_quantile, ) # fit model trainer.fit(model, datamodule=data) model = model.load_from_checkpoint(checkpoint.best_model_path) model: CQR = model.eval() # run conformal x_cal, y_cal = data.cal_dset.tensors x_cal_val, y_cal_val = data.cal_val_dset.tensors q_hats = [] for alpha in alphas: q_hat = model.get_q_hat(x_cal, y_cal, alpha) metrics = model.get_metrics( x_cal_val, y_cal_val, q_hat, ) metrics["size"] /= data.y_min_max_scaler.data_range_.item() logger.log_metrics(rename_metrics(metrics, "val", alpha)) q_hats.append(q_hat) # testing if not run_test: wandb.finish() return model, data, q_hats x_test, y_test = data.test_dset.tensors for alpha, q_hat in zip(alphas, q_hats): metrics = model.get_metrics( x_test, y_test, q_hat, ) metrics["size"] /= data.y_min_max_scaler.data_range_.item() logger.log_metrics(rename_metrics(metrics, "test", alpha)) def run_pcp( dataset_name: str, lr: float, wd: float, epochs: int, hidden: int, seed: int, alphas: list[float], n_mixture: int, # saving settings checkpoint_folder: str, version: str, wandb_log_dir: str, # run_test: bool = False, ): ts = timestamp() name = f"pcp_version-{version}_{ts}" trainer, logger, checkpoint, data = setup_trainer_and_data( name=name, wandb_log_dir=wandb_log_dir, epochs=epochs, version=version, dataset_name=dataset_name, seed=seed, checkpoint_folder=checkpoint_folder, y_scaling="std", ) seed_everything(seed) wandb.config.update({ "dataset_name": dataset_name, "alphas": alphas, "model": "pcp", "seed": seed, }) # set up model x_dim = data.train_dset.tensors[0].shape[1]
def setup_trainer_and_data( name: str, wandb_log_dir: str, epochs: int, version: str, checkpoint_folder: str, dataset_name: str, seed: int, y_scaling: str = "min_max", discretize_n_bins: int = None, smart_discretize: bool = True, ) -> tuple[Trainer, WandbLogger, ModelCheckpoint, RegressionData]: data = RegressionData( dataset_name, train_seed=seed, y_scaling=y_scaling, discretize_n_bins=discretize_n_bins, smart_discretize=smart_discretize, ) logger = WandbLogger( project=WANDB_PROJECT, save_dir=wandb_log_dir, name=name, group=version, version=f"{version}_{name}", ) checkpoint = ModelCheckpoint( dirpath=os.path.join(checkpoint_folder, name) ) max_steps_per_epoch = 100 max_val_steps = 10 train_batches = data.train_batches(max_steps_per_epoch) trainer = Trainer( logger=logger, callbacks=[ EarlyStopping(monitor="val/loss", patience=epochs // 4, mode="min"), LearningRateMonitor(), checkpoint, ], accelerator="gpu", max_steps=epochs * max_steps_per_epoch, check_val_every_n_epoch=1, limit_train_batches=train_batches, limit_val_batches=data.val_batches(max_val_steps), enable_progress_bar=False, gradient_clip_val=5, log_every_n_steps=train_batches, ) return trainer, logger, checkpoint, data def run_conditional_histogram( dataset_name: str, lr: float, wd: float, epochs: int, hidden: int, n_bins: int, seed: int, alphas: list[float], smart_bin_positions: bool, # saving settings checkpoint_folder: str, version: str, wandb_log_dir: str, # run_test: bool = False, ): # set up data ts = timestamp() name = f"conditional_hist_version-{version}_{ts}" trainer, logger, checkpoint, data = setup_trainer_and_data( name=name, wandb_log_dir=wandb_log_dir, epochs=epochs, version=version, dataset_name=dataset_name, seed=seed, checkpoint_folder=checkpoint_folder, discretize_n_bins=n_bins, smart_discretize=smart_bin_positions, ) seed_everything(seed) wandb.config.update({ "dataset_name": dataset_name, "alphas": alphas, "model": "conditional_hist", "n_bins": n_bins, "smart_bin_positions": smart_bin_positions, "seed": seed, }) # set up model x_train, y_train = data.train_dset.tensors model = ConditionalHist( input_dim=x_train.shape[1], hidden_dim=hidden, bins=data.bins, lr=lr, wd=wd, max_iter=trainer.max_steps, y_min=0.0, ) # fit model trainer.fit(model, datamodule=data) model = model.load_from_checkpoint(checkpoint.best_model_path) model: ConditionalHist = model.eval() # run conformal x_cal, y_cal = data.cal_dset.tensors x_cal_val, y_cal_val = data.cal_val_dset.tensors thresholds = [] hpd_thresholds = [] for alpha in alphas: threshold = model.find_prob_threshold(x_cal, y_cal, alpha) thresholds.append(threshold) metrics = model.get_metrics(x_cal_val, y_cal_val, threshold) logger.log_metrics(rename_metrics(metrics, "val", alpha)) # hpd hpd_threshold = model.get_hpd_threshold(x_cal, y_cal, alpha) hpd_thresholds.append(hpd_threshold) metrics = model.get_hpd_metrics(x_cal_val, y_cal_val, hpd_threshold) logger.log_metrics(rename_metrics(metrics, "val", alpha)) # testing if not run_test: wandb.finish() return model, data, thresholds x_test, y_test = data.test_dset.tensors for alpha, threshold, hpd_threshold in zip(alphas, thresholds, hpd_thresholds): thresholds.append(threshold) metrics = model.get_metrics(x_test, y_test, threshold) logger.log_metrics(rename_metrics(metrics, "test", alpha)) # hpd hpd_thresholds.append(hpd_threshold) metrics = model.get_hpd_metrics(x_test, y_test, hpd_threshold) logger.log_metrics(rename_metrics(metrics, "test", alpha)) def run_cqr( dataset_name: str, lr: float, wd: float, epochs: int, hidden: int, seed: int, alphas: list[float], qr_interval: float, # saving settings checkpoint_folder: str, version: str, wandb_log_dir: str, # run_test: bool = False, ): ts = timestamp() name = f"cqr_version-{version}_{ts}" trainer, logger, checkpoint, data = setup_trainer_and_data( name=name, wandb_log_dir=wandb_log_dir, epochs=epochs, version=version, dataset_name=dataset_name, seed=seed, checkpoint_folder=checkpoint_folder, y_scaling="std", ) seed_everything(seed) wandb.config.update({ "dataset_name": dataset_name, "alphas": alphas, "model": "cqr", "qr_interval": qr_interval, "seed": seed, }) # set up model x_dim = data.train_dset.tensors[0].shape[1] low_quantile = round((1 - qr_interval) / 2, 3) high_quantile = 1 - low_quantile model = CQR( input_dim=x_dim, hidden_dim=hidden, lr=lr, wd=wd, max_iter=trainer.max_steps, low_quantile=low_quantile, high_quantile=high_quantile, ) # fit model trainer.fit(model, datamodule=data) model = model.load_from_checkpoint(checkpoint.best_model_path) model: CQR = model.eval() # run conformal x_cal, y_cal = data.cal_dset.tensors x_cal_val, y_cal_val = data.cal_val_dset.tensors q_hats = [] for alpha in alphas: q_hat = model.get_q_hat(x_cal, y_cal, alpha) metrics = model.get_metrics( x_cal_val, y_cal_val, q_hat, ) metrics["size"] /= data.y_min_max_scaler.data_range_.item() logger.log_metrics(rename_metrics(metrics, "val", alpha)) q_hats.append(q_hat) # testing if not run_test: wandb.finish() return model, data, q_hats x_test, y_test = data.test_dset.tensors for alpha, q_hat in zip(alphas, q_hats): metrics = model.get_metrics( x_test, y_test, q_hat, ) metrics["size"] /= data.y_min_max_scaler.data_range_.item() logger.log_metrics(rename_metrics(metrics, "test", alpha)) def run_pcp( dataset_name: str, lr: float, wd: float, epochs: int, hidden: int, seed: int, alphas: list[float], n_mixture: int, # saving settings checkpoint_folder: str, version: str, wandb_log_dir: str, # run_test: bool = False, ): ts = timestamp() name = f"pcp_version-{version}_{ts}" trainer, logger, checkpoint, data = setup_trainer_and_data( name=name, wandb_log_dir=wandb_log_dir, epochs=epochs, version=version, dataset_name=dataset_name, seed=seed, checkpoint_folder=checkpoint_folder, y_scaling="std", ) seed_everything(seed) wandb.config.update({ "dataset_name": dataset_name, "alphas": alphas, "model": "pcp", "seed": seed, }) # set up model x_dim = data.train_dset.tensors[0].shape[1]
model = PCP(
4
2023-11-01 18:04:29+00:00
12k
nik-sm/com-hom-emg
scripts/collect_fresh_classifier_stats.py
[ { "identifier": "DataModule", "path": "com_hom_emg/data.py", "snippet": "class DataModule(LightningDataModule):\n @staticmethod\n def add_argparse_args(parent_parser):\n parser = parent_parser.add_argument_group(\"DataModule\")\n parser.add_argument(\"--fold\", type=int, required=True)\n parser.add_argument(\"--n_train_subj\", type=int, default=8)\n parser.add_argument(\"--n_val_subj\", type=int, default=1)\n parser.add_argument(\"--n_test_subj\", type=int, default=1)\n parser.add_argument(\"--batch_size\", type=int, default=128)\n parser.add_argument(\"--num_workers\", type=int, default=8)\n parser.add_argument(\"--use_preprocessed_data\", type=str2bool, default=False)\n return parent_parser\n\n def __init__(\n self,\n *,\n # seed and per_subj_data come from cli\n seed: int,\n per_subj_data: dict,\n #\n fold: int,\n n_train_subj: int,\n n_val_subj: int,\n n_test_subj: int,\n batch_size: int,\n num_workers: int,\n use_preprocessed_data: bool,\n **kw,\n ):\n \"\"\"\n From N subjects, we select 1 for val, 1 for test, and N-2 for train.\n In each set, data are merged and shuffled.\n While loading, we distinguish single and double gestures for easier splitting during train steps.\n \"\"\"\n super().__init__()\n self.train_set, self.val_set, self.test_set = get_datasets(\n per_subj_data, fold, n_train_subj, n_val_subj, n_test_subj, use_preprocessed_data\n )\n self.batch_size = batch_size\n self.num_workers = num_workers\n self.seed = seed\n self.example_data_shape = self.train_set.tensors[0][0].shape\n\n def get_loader(self, dataset, shuffle: bool):\n return DataLoader(\n dataset,\n shuffle=shuffle,\n pin_memory=True,\n batch_size=self.batch_size,\n num_workers=self.num_workers,\n worker_init_fn=seed_worker,\n generator=torch.Generator().manual_seed(self.seed),\n persistent_workers=True,\n )\n\n def train_dataloader(self):\n return self.get_loader(self.train_set, shuffle=True)\n\n def val_dataloader(self):\n return self.get_loader(self.val_set, shuffle=False)\n\n def test_dataloader(self):\n return self.get_loader(self.test_set, shuffle=False)" }, { "identifier": "get_per_subj_data", "path": "com_hom_emg/data.py", "snippet": "def get_per_subj_data():\n path = PROJECT_PATH / \"data\" / \"combination-gesture-dataset\" / \"python\"\n per_subj_data = {}\n for subj_idx in range(10):\n per_subj_data[subj_idx] = {\n \"data\": np.load(path / f\"subj{subj_idx}/data.npy\"),\n \"labels\": np.load(path / f\"subj{subj_idx}/labels.npy\"),\n }\n return per_subj_data" }, { "identifier": "LearnedEmbedding", "path": "com_hom_emg/model.py", "snippet": "class LearnedEmbedding(pl.LightningModule):\n @staticmethod\n def add_argparse_args(parent_parser):\n parser = parent_parser.add_argument_group(\"LearnedEmbedding\")\n parser.add_argument(\"--encoder_arch\", choices=[\"basic\", \"conformer\", \"vit\", \"identity\"], default=\"basic\")\n parser.add_argument(\"--clf_arch\", choices=[\"small\", \"large\"], default=\"small\")\n parser.add_argument(\"--feature_dim\", type=int, default=64)\n # Note that with normalized features, we might need to re-normalized after making combinations\n parser.add_argument(\"--data_noise_SNR\", type=float, default=None, help=\"Desired SNR in dB. None for no noise.\")\n parser.add_argument(\n \"--feature_noise_SNR\", type=float, default=None, help=\"Desired SNR in dB. None for no noise.\"\n )\n parser.add_argument(\"--normalized_features\", type=str2bool, default=False)\n parser.add_argument(\"--feature_combine_type\", choices=[\"avg\", \"mlp\"], default=\"avg\")\n parser.add_argument(\"--lr\", type=float, default=3e-4)\n parser.add_argument(\"--lr_decay\", type=float, default=1.0)\n parser.add_argument(\"--linearity_loss_coeff\", type=float, default=1.0)\n parser.add_argument(\"--real_CE_loss_coeff\", type=float, default=1.0)\n parser.add_argument(\"--fake_CE_loss_coeff\", type=float, default=1.0)\n parser.add_argument(\"--loss_type\", choices=[\"triplet\", \"triplet-centroids\", \"triplet-hard\"], default=\"triplet\")\n parser.add_argument(\"--margin\", type=float, default=1.0)\n parser.add_argument(\"--centroids_momentum\", type=float, default=0.75, help=\"For `triplet-centroids` loss\")\n parser.add_argument(\"--triplets_per_item\", type=int, default=1, help=\"For `triplet` loss\")\n\n parser = parent_parser.add_argument_group(\"LearnedEmbedding - Fine-tuning\")\n parser.add_argument(\"--finetune_steps\", type=int, default=10_000)\n parser.add_argument(\"--finetune_lr\", type=float, default=3e-5)\n parser.add_argument(\"--finetune_lr_decay\", type=float, default=1.0)\n parser.add_argument(\"--finetune_batch_size\", type=float, default=32)\n parser.add_argument(\"--finetune_test_frac\", type=float, default=0.2)\n parser.add_argument(\"--finetune_n_aug_per_class\", type=int, default=-1, help=\"-1 for all, positive for N\")\n return parent_parser\n\n def __init__(self, **kwargs):\n super().__init__()\n self.save_hyperparameters() # Access arg from command line \"--arg1\" at \"self.hparams.arg1\", etc\n\n # NOTE - self.example_input_array - magic pytorch lightning variable for tboard log_graph\n self.example_input_array = torch.ones(1, self.hparams.input_channels, self.hparams.input_time_length)\n if self.hparams.encoder_arch == \"basic\":\n self.embedding = EmbeddingNetwork(\n input_channels=self.hparams.input_channels,\n input_time_length=self.hparams.input_time_length,\n feature_dim=self.hparams.feature_dim,\n normalized_features=self.hparams.normalized_features,\n use_preprocessed_data=self.hparams.use_preprocessed_data,\n )\n elif self.hparams.encoder_arch == \"conformer\":\n self.embedding = Conformer(\n feature_dim=self.hparams.feature_dim,\n normalized_features=self.hparams.normalized_features,\n )\n elif self.hparams.encoder_arch == \"vit\":\n vit = SimpleViT(\n seq_len=962,\n channels=8,\n patch_size=37,\n num_classes=self.hparams.feature_dim,\n dim=256,\n depth=6,\n heads=8,\n mlp_dim=256,\n )\n if self.hparams.normalized_features:\n vit = nn.Sequential(vit, UnitNormLayer())\n self.embedding = vit\n elif self.hparams.arch == \"identity\":\n self.embedding = DummyIdentity()\n else:\n raise NotImplementedError()\n if self.hparams.clf_arch == \"small\":\n self.direction_clf = nn.Linear(self.hparams.feature_dim, 5)\n self.modifier_clf = nn.Linear(self.hparams.feature_dim, 5)\n elif self.hparams.clf_arch == \"large\":\n self.direction_clf = MLPClf(self.hparams.feature_dim, 5)\n self.modifier_clf = MLPClf(self.hparams.feature_dim, 5)\n if self.hparams.loss_type == \"triplet\":\n self.linearity_loss_fn = TripletLoss(\n margin=self.hparams.margin,\n triplets_per_item=self.hparams.triplets_per_item,\n )\n elif self.hparams.loss_type == \"triplet-centroids\":\n self.linearity_loss_fn = TripletCentroids(\n margin=self.hparams.margin,\n feature_dim=self.hparams.feature_dim,\n device=\"cuda\" if self.hparams.accelerator == \"gpu\" else \"cpu\",\n momentum=self.hparams.centroids_momentum,\n )\n elif self.hparams.loss_type == \"triplet-hard\":\n self.linearity_loss_fn = TripletLossHardMining(\n margin=self.hparams.margin,\n )\n else:\n logger.error(f\"Unknown loss type: {self.hparams.loss_type}\")\n raise NotImplementedError()\n if self.hparams.feature_combine_type == \"avg\":\n # Store on self so it will be detected as additional params\n combine_fn = Avg()\n elif self.hparams.feature_combine_type == \"mlp\":\n combine_fn = MLPCombine(feature_dim=self.hparams.feature_dim)\n self.feature_combination = CombinePairs(\n combine_fn=combine_fn, normalized_features=self.hparams.normalized_features\n )\n\n def forward(self, preprocessed_emg_data):\n features = self.embedding(preprocessed_emg_data)\n return features\n\n def training_step(self, batch, batch_idx):\n (data, labels, is_single, subj_ids) = batch\n # Add noise to each class separately to reach the desired SNR\n if self.hparams.data_noise_SNR is not None:\n with torch.no_grad():\n for label in labels.unique(dim=0):\n subset_idx = (labels == label).all(-1)\n subset = data[subset_idx]\n data[subset_idx] = subset + get_noise(subset, self.hparams.data_noise_SNR)\n\n # Compute features for real data\n real_features = self.embedding(data)\n # Add noise to features\n if self.hparams.feature_noise_SNR is not None:\n for label in labels.unique(dim=0):\n subset_idx = (labels == label).all(-1)\n subset = real_features[subset_idx]\n real_features[subset_idx] = subset + get_noise(subset, self.hparams.feature_noise_SNR)\n\n # Create fake double features features from real singles\n single_features = real_features[is_single]\n single_labels = labels[is_single]\n try:\n fake_double_features, fake_double_labels = self.feature_combination(single_features, single_labels)\n except InsufficientDataError:\n logger.warning(\"Insufficient data for augmentation. Skipping batch.\")\n return None\n\n # Isolate real double features from batch\n real_double_features, real_double_labels = real_features[~is_single], labels[~is_single]\n if len(real_double_features) == 0:\n logger.warning(\"No real double features in batch. Skipping batch.\")\n return None\n if len(fake_double_features) == 0:\n logger.warning(\"No fake double features in batch. Skipping batch.\")\n return None\n\n # Compute linearity loss\n linearity_loss = self.linearity_loss_fn(\n real_double_features=real_double_features,\n real_double_labels=real_double_labels,\n fake_double_features=fake_double_features,\n fake_double_labels=fake_double_labels,\n )\n\n # Compute classification loss on real data\n real_dir_logits = self.direction_clf(real_features)\n CE_real_dir = F.cross_entropy(real_dir_logits, labels[:, 0])\n bal_acc_real_dir = accuracy(\n real_dir_logits.argmax(-1), labels[:, 0], task=\"multiclass\", num_classes=5, average=\"macro\"\n )\n\n real_mod_logits = self.modifier_clf(real_features)\n CE_real_mod = F.cross_entropy(real_mod_logits, labels[:, 1])\n bal_acc_real_mod = accuracy(\n real_mod_logits.argmax(-1), labels[:, 1], task=\"multiclass\", num_classes=5, average=\"macro\"\n )\n\n # Compute classification loss on fake combinations\n fake_dir_logits = self.direction_clf(fake_double_features)\n CE_fake_dir = F.cross_entropy(fake_dir_logits, fake_double_labels[:, 0])\n bal_acc_fake_dir = accuracy(\n fake_dir_logits.argmax(-1), fake_double_labels[:, 0], task=\"multiclass\", num_classes=5, average=\"macro\"\n )\n\n fake_mod_logits = self.modifier_clf(fake_double_features)\n CE_fake_mod = F.cross_entropy(fake_mod_logits, fake_double_labels[:, 1])\n bal_acc_fake_mod = accuracy(\n fake_mod_logits.argmax(-1), fake_double_labels[:, 1], task=\"multiclass\", num_classes=5, average=\"macro\"\n )\n\n # Decrease emphasis on fake CE so they have equal importance\n down_ratio = len(real_features) / len(fake_double_features)\n real_CE = self.hparams.real_CE_loss_coeff * (CE_real_dir + CE_real_mod) / 2\n fake_CE = down_ratio * self.hparams.fake_CE_loss_coeff * (CE_fake_dir + CE_fake_mod) / 2\n lin_loss = self.hparams.linearity_loss_coeff * linearity_loss\n total_loss = real_CE + fake_CE + lin_loss\n\n # Log individual loss terms (before applying coefficients)\n self.log(\"train/CE_real_dir\", CE_real_dir)\n self.log(\"train/CE_real_mod\", CE_real_mod)\n self.log(\"train/CE_fake_dir\", CE_fake_dir)\n self.log(\"train/CE_fake_mod\", CE_fake_mod)\n self.log(\"train/linearity_loss\", linearity_loss)\n tb = self.logger.experiment\n tb.add_histogram(\"train/real_double_feature_norm\", real_double_features.norm(dim=-1), self.global_step)\n tb.add_histogram(\"train/fake_double_feature_norm\", fake_double_features.norm(dim=-1), self.global_step)\n\n # Log total loss\n self.log(\"train/total_loss\", total_loss)\n\n # Log balanced accuracies\n self.log(\"train/bal_acc_real_dir\", bal_acc_real_dir)\n self.log(\"train/bal_acc_real_mod\", bal_acc_real_mod)\n self.log(\"train/bal_acc_fake_dir\", bal_acc_fake_dir)\n self.log(\"train/bal_acc_fake_mod\", bal_acc_fake_mod)\n return total_loss\n\n def training_epoch_end(self, outputs):\n metrics = {k: v.item() if isinstance(v, torch.Tensor) else v for k, v in self.trainer.callback_metrics.items()}\n metrics = {f\"{k}\": f\"{v:.4f}\" for k, v in metrics.items()}\n logger.info(f\"Epoch: {self.current_epoch}, Metrics: {metrics}\")\n\n def _val_or_test_step(self, batch, name=None):\n (data, labels, is_single, subj_ids) = batch\n\n # Compute metrics on real data\n real_features = self.embedding(data)\n real_dir_logits = self.direction_clf(real_features)\n real_mod_logits = self.modifier_clf(real_features)\n real_preds = torch.stack((real_dir_logits.argmax(-1), real_mod_logits.argmax(-1)), dim=-1)\n real_cm = get_combo_conf_mat(labels, real_preds)\n\n # To be clear that fake data is not part of the result, compute result before making fake data\n res = {\"features\": real_features, \"labels\": labels, \"is_single\": is_single, \"subj_ids\": subj_ids}\n\n # Compute metrics on fake data\n single_features = real_features[is_single]\n single_labels = labels[is_single]\n try:\n fake_double_features, fake_double_labels = self.feature_combination(single_features, single_labels)\n except InsufficientDataError:\n logger.warning(\"Insufficient data for augmentation. Skipping batch.\")\n return None\n fake_dir_logits = self.direction_clf(fake_double_features)\n fake_mod_logits = self.modifier_clf(fake_double_features)\n\n fake_preds = torch.stack((fake_dir_logits.argmax(-1), fake_mod_logits.argmax(-1)), dim=-1)\n fake_cm = get_combo_conf_mat(fake_double_labels, fake_preds)\n if name is not None:\n self.log(f\"{name}/single_bal_acc\", np.nanmean(np.diag(real_cm)[:8]))\n self.log(f\"{name}/double_bal_acc\", np.nanmean(np.diag(real_cm)[8:]))\n self.log(f\"{name}/overall_bal_acc\", np.nanmean(np.diag(real_cm)[:24]))\n self.log(f\"{name}/fake_double_bal_acc\", np.nanmean(np.diag(fake_cm)[8:]))\n return res\n\n def validation_step(self, batch, batch_idx):\n self._val_or_test_step(batch, \"val\")\n\n def test_step(self, batch, batch_idx):\n return self._val_or_test_step(batch, None)\n\n @torch.enable_grad()\n @torch.inference_mode(False)\n def test_epoch_end(self, outputs):\n features = torch.cat([x[\"features\"] for x in outputs])\n labels = torch.cat([x[\"labels\"] for x in outputs])\n is_single = torch.cat([x[\"is_single\"] for x in outputs])\n subj_ids = torch.cat([x[\"subj_ids\"] for x in outputs])\n\n combined_evaluation = self.run_finetune_evaluation(features, labels, is_single, subj_ids)\n scalars = [\"single_bal_acc\", \"double_bal_acc\", \"overall_bal_acc\"]\n for scenario in [\"zero_shot\", \"upper_bound\", \"lower_bound\", \"augmented\"]:\n for key in scalars:\n value = combined_evaluation[scenario][key]\n self.log(f\"test_{scenario}/{key}\", value, sync_dist=True)\n\n # Save confusion matrix\n path = Path(self.logger.log_dir)\n np.save(path / f\"test.{scenario}.confusion_matrix.npy\", combined_evaluation[scenario][\"confusion_matrix\"])\n # TODO - how else can we get output from pytorch lightning's trainer.test()?\n return None\n\n def run_finetune_evaluation(self, features, labels, is_single, subj_ids):\n logger.info(\"Try evaluation by fine-tuning pre-trained dir_clf and mod_clf\")\n # Freeze the feature combination fn, just to be safe\n for param in self.feature_combination.parameters():\n param.requires_grad = False\n self.feature_combination.eval()\n\n evaluations = []\n for subj_id in subj_ids.unique():\n logger.info(f\"Fine-tuning evaluation for subject {subj_id}\")\n # Get subset of features and labels for this subject\n idx = subj_ids == subj_id\n evaluations.append(\n self.run_finetune_one_subj(features=features[idx], labels=labels[idx], is_single=is_single[idx])\n )\n\n combined_evaluation = {}\n for key in [\"upper_bound\", \"lower_bound\", \"augmented\", \"zero_shot\"]:\n combined_evaluation[key] = {\n \"single_bal_acc\": np.mean([x[key][\"single_bal_acc\"] for x in evaluations]),\n \"double_bal_acc\": np.mean([x[key][\"double_bal_acc\"] for x in evaluations]),\n \"overall_bal_acc\": np.mean([x[key][\"overall_bal_acc\"] for x in evaluations]),\n \"confusion_matrix\": np.mean([x[key][\"confusion_matrix\"] for x in evaluations], axis=0),\n }\n return combined_evaluation\n\n def run_finetune_one_subj(self, features, labels, is_single):\n # Split into train/test\n N_single = is_single.sum().item()\n N_single_test = int(N_single * self.hparams.finetune_test_frac)\n\n N_double = (~is_single).sum().item()\n N_double_test = int(N_double * self.hparams.finetune_test_frac)\n\n np.random.seed(0)\n single_perm = np.random.permutation(N_single)\n test_single_feat = features[is_single][single_perm[:N_single_test]]\n test_single_labels = labels[is_single][single_perm[:N_single_test]]\n train_single_feat = features[is_single][single_perm[N_single_test:]]\n train_single_labels = labels[is_single][single_perm[N_single_test:]]\n\n double_perm = np.random.permutation(N_double)\n test_double_feat = features[~is_single][double_perm[:N_double_test]]\n test_double_labels = labels[~is_single][double_perm[:N_double_test]]\n train_double_feat = features[~is_single][double_perm[N_double_test:]]\n train_double_labels = labels[~is_single][double_perm[N_double_test:]]\n\n def try_once(which: str):\n logger.info(f\"Finetune for scenario: {which}\")\n aug = {\"upper\": None, \"lower\": None, \"aug\": self.feature_combination}[which]\n doubles_in_train = {\"upper\": True, \"lower\": False, \"aug\": False}[which]\n\n # Setup train data\n logger.debug(f\"real singles: {len(train_single_feat)}\")\n logger.debug(f\"real doubles: {len(train_double_feat)}\")\n if doubles_in_train:\n x_train = torch.cat((train_single_feat, train_double_feat))\n y_train = torch.cat((train_single_labels, train_double_labels))\n else:\n x_train = train_single_feat\n y_train = train_single_labels\n if aug is not None:\n x_aug, y_aug = aug(train_single_feat, train_single_labels)\n if self.hparams.finetune_n_aug_per_class > 0:\n # Subset each class\n res_x, res_y = [], []\n for c in y_aug.unique(dim=0):\n idx = (y_aug == c).all(dim=1)\n perm = np.random.permutation(idx.sum().item())\n res_x.append(x_aug[idx][perm[: self.hparams.finetune_n_aug_per_class]])\n res_y.append(y_aug[idx][perm[: self.hparams.finetune_n_aug_per_class]])\n x_aug, y_aug = torch.cat(res_x), torch.cat(res_y)\n logger.debug(f\"n_aug_per_class: {self.hparams.finetune_n_aug_per_class}\")\n logger.debug(f\"fake doubles: {x_aug.shape[0]}\")\n x_train = torch.cat([x_train, x_aug])\n y_train = torch.cat([y_train, y_aug])\n\n x_train, y_train = shuffle_together(x_train, y_train)\n\n # Setup test data\n x_test = torch.cat([test_single_feat, test_double_feat])\n y_test = torch.cat([test_single_labels, test_double_labels])\n x_test, y_test = shuffle_together(x_test, y_test)\n\n # Make a temporary copy of the models\n dir_clf = deepcopy(self.direction_clf)\n mod_clf = deepcopy(self.modifier_clf)\n optim = torch.optim.AdamW(chain(dir_clf.parameters(), mod_clf.parameters()), lr=self.hparams.finetune_lr)\n sched = torch.optim.lr_scheduler.ExponentialLR(optim, gamma=self.hparams.finetune_lr_decay)\n # Since the features are already on GPU, can't use multiprocess dataloader\n bs = self.hparams.finetune_batch_size\n train_loader = DataLoader(TensorDataset(x_train, y_train), batch_size=bs, shuffle=True, num_workers=0)\n test_loader = DataLoader(TensorDataset(x_test, y_test), batch_size=bs, shuffle=False, num_workers=0)\n\n def infinite_cycle(loader):\n while True:\n for x, y in loader:\n yield x, y\n\n inf_train_loader = infinite_cycle(train_loader)\n\n @torch.no_grad()\n def test():\n dir_clf.eval()\n mod_clf.eval()\n dir_logits, mod_logits, y_test = [], [], []\n for x, y in test_loader:\n dir_logits.append(dir_clf(x))\n mod_logits.append(mod_clf(x))\n y_test.append(y)\n dir_logits = torch.cat(dir_logits)\n mod_logits = torch.cat(mod_logits)\n y_test = torch.cat(y_test)\n preds = torch.stack((dir_logits.argmax(-1), mod_logits.argmax(-1)), dim=-1)\n cm = get_combo_conf_mat(y_test, preds)\n return {\n \"single_bal_acc\": np.nanmean(np.diag(cm)[:8]),\n \"double_bal_acc\": np.nanmean(np.diag(cm)[8:]),\n \"overall_bal_acc\": np.nanmean(np.diag(cm)[:24]),\n \"confusion_matrix\": cm,\n }\n\n zero_shot_res = test() # Test once with no fine-tuning\n logger.debug(f\"Zero-shot results: {zero_shot_res}\")\n tb = self.logger.experiment\n # Graphs will start with the zero-shot result\n scalars = [\"single_bal_acc\", \"double_bal_acc\", \"overall_bal_acc\"]\n for k in scalars:\n v = zero_shot_res[k]\n tb.add_scalar(f\"finetune/{which}/{k}\", v, 0) # Start at x-axis=0\n # Continue graphs from 1 onward\n dir_clf.train()\n mod_clf.train()\n for i in range(1, self.hparams.finetune_steps + 1):\n x, y = next(inf_train_loader)\n optim.zero_grad()\n dir_logits = dir_clf(x)\n mod_logits = mod_clf(x)\n dir_loss = F.cross_entropy(dir_logits, y[:, 0])\n mod_loss = F.cross_entropy(mod_logits, y[:, 1])\n loss = dir_loss + mod_loss\n loss.backward()\n optim.step()\n\n if i % 100 == 0:\n finetuned_res = test()\n logger.debug(f\"Step {i} results: {finetuned_res}\")\n for k in scalars:\n v = finetuned_res[k]\n tb.add_scalar(f\"finetune/{which}/{k}\", v, i)\n dir_clf.train()\n mod_clf.train()\n sched.step()\n\n finetuned_res = test()\n\n return finetuned_res, zero_shot_res\n\n upper, zero_shot_res = try_once(\"upper\")\n lower, _ = try_once(\"lower\")\n aug, _ = try_once(\"aug\")\n return {\"zero_shot\": zero_shot_res, \"upper_bound\": upper, \"lower_bound\": lower, \"augmented\": aug}\n\n def configure_optimizers(self):\n optim = torch.optim.AdamW(self.embedding.parameters(), lr=self.hparams.lr)\n sched = torch.optim.lr_scheduler.ExponentialLR(optim, gamma=self.hparams.lr_decay)\n return {\"optimizer\": optim, \"lr_scheduler\": {\"scheduler\": sched, \"name\": \"lr_sched\"}}" }, { "identifier": "ControlModel_RandomGuess", "path": "com_hom_emg/parallel_models.py", "snippet": "class ControlModel_RandomGuess(BaseParallelModel):\n def __init__(self, *args, **kw):\n pass\n\n def fit(self, x, y):\n return self\n\n def predict_proba(self, x):\n # Create random probs output with correct shape\n # Note that the probabilities should be normalized along the final axis\n # This is the same axis where we'll choose one prediction\n probs = np.random.rand(x.shape[0], 2, 5)\n probs /= np.sum(probs, axis=-1, keepdims=True)\n return probs\n\n def predict(self, x):\n return self.predict_proba(x).argmax(-1)\n\n def save(self, path):\n pass\n\n @classmethod\n def load(cls, path):\n pass\n\n def get_params(self, deep=True):\n return {}\n\n def __repr__(self):\n return f\"{type(self).__name__}()\"" }, { "identifier": "ParallelA", "path": "com_hom_emg/parallel_models.py", "snippet": "class ParallelA(BaseParallelModel):\n DEFAULT_SAVE_NAME = \"ParallelA.pkl\"\n\n def __init__(self, dir_clf, mod_clf):\n self.dir_clf = dir_clf\n self.mod_clf = mod_clf\n\n def get_params(self, deep=True):\n return {\"dir_clf\": self.dir_clf, \"mod_clf\": self.mod_clf}\n\n def fit(self, x, y):\n self.dir_clf.fit(x, y[:, 0])\n self.mod_clf.fit(x, y[:, 1])\n return self\n\n def predict_proba(self, x):\n prob0 = self.dir_clf.predict_proba(x)\n prob1 = self.mod_clf.predict_proba(x)\n return np.stack([prob0, prob1], axis=1)\n\n def predict(self, x):\n return self.predict_proba(x).argmax(-1)\n\n def save(self, save_dir: Path) -> Path:\n assert save_dir.exists() and save_dir.is_dir()\n file_path = save_dir / self.DEFAULT_SAVE_NAME\n with open(file_path, \"wb\") as f:\n pickle.dump(self, f)\n return file_path\n\n @classmethod\n def load(cls, file_path: Path) -> \"ParallelA\":\n with open(file_path, \"rb\") as f:\n return pickle.load(f)\n\n def __repr__(self):\n return f\"{type(self).__name__}(dir_clf={self.dir_clf}, mod_clf={self.mod_clf})\"" }, { "identifier": "get_combo_conf_mat", "path": "com_hom_emg/scoring.py", "snippet": "def get_combo_conf_mat(y_true_2d, y_pred_2d, normalize=True):\n \"\"\"We get a confusion matrix of shape (25, 25). Row is true class, col is predicted.\n Entries are arranged like this:\n (D1, None), ..., (D4, None), (None, M1), ..., (None, M4), (D1, M1), ...\n (D1, M4), (D2, M1), ... (D2, M4), ... (D4, M4), (None, None)\n where D1 ... D4 are directions in order of appearance from DIRECTION_GESTURES\n and M1 ... M4 are modifiers in order of appearance from MODIFIER_GESTURES.\n This means the first 4 rows are each \"direction-only\" label, next 4 are \"modifier-only\" labels.\"\"\"\n cm = np.zeros((len(CANONICAL_COORDS), len(CANONICAL_COORDS)))\n for yt, yp in zip(y_true_2d, y_pred_2d):\n cm[CANONICAL_COORDS.index(tuple(yt)), CANONICAL_COORDS.index(tuple(yp))] += 1\n if normalize:\n # NOTE - result may contain nans - use nanmean later\n with np.errstate(all=\"ignore\"): # Ignore division by zero for empty rows\n cm /= cm.sum(axis=-1, keepdims=True)\n return cm" }, { "identifier": "PROJECT_PATH", "path": "com_hom_emg/utils.py", "snippet": "PROJECT_PATH = Path(__file__).parent.parent" } ]
import argparse import re import sys import numpy as np import pandas as pd import torch import yaml from copy import deepcopy from pathlib import Path from typing import List, Optional from ablation_settings import settings_names as ablation_settings_names from loguru import logger from pytorch_lightning import seed_everything from regular_settings import settings_names as regular_settings_names from rich.console import Console from rich.table import Table from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA from sklearn.ensemble import RandomForestClassifier as RF from sklearn.linear_model import LogisticRegression as LogR from sklearn.neighbors import KNeighborsClassifier as KNN from sklearn.tree import DecisionTreeClassifier as DT from tqdm import tqdm from utils import table_to_csv from com_hom_emg.data import DataModule, get_per_subj_data from com_hom_emg.model import LearnedEmbedding from com_hom_emg.parallel_models import ControlModel_RandomGuess, ParallelA from com_hom_emg.scoring import get_combo_conf_mat from com_hom_emg.utils import PROJECT_PATH
9,105
result_x.append(x) result_y.append(y) return torch.cat(result_x), torch.cat(result_y) def get_clf(name: str): if name == "logr": return LogR(class_weight="balanced", max_iter=4000, n_jobs=-1) elif name == "lda": return LDA() elif name == "knn": return KNN(n_jobs=-1) elif name == "rf": return RF(n_jobs=-1, class_weight="balanced") elif name == "dt": return DT(class_weight="balanced") else: raise ValueError(f"Unknown classifier name: {name}") @torch.no_grad() def try_fresh_classifier(embedding, test_loader, clf_name: str, test_frac=0.2, N_aug_each_class=500): # Get features embedding.to(device) features, labels, is_single = [], [], [] for batch_data, batch_labels, batch_is_single, _subj_ids in test_loader: features.append(embedding(batch_data.to(device))) labels.append(batch_labels.to(device)) is_single.append(batch_is_single) features = torch.cat(features) labels = torch.cat(labels) is_single = torch.cat(is_single) # Create a single train/test split N_single = is_single.sum().item() N_single_test = int(N_single * test_frac) N_double = (~is_single).sum().item() N_double_test = int(N_double * test_frac) np.random.seed(0) single_perm = np.random.permutation(N_single) test_single_feat = features[is_single][single_perm[:N_single_test]] test_single_labels = labels[is_single][single_perm[:N_single_test]] train_single_feat = features[is_single][single_perm[N_single_test:]] train_single_labels = labels[is_single][single_perm[N_single_test:]] double_perm = np.random.permutation(N_double) test_double_feat = features[~is_single][double_perm[:N_double_test]] test_double_labels = labels[~is_single][double_perm[:N_double_test]] train_double_feat = features[~is_single][double_perm[N_double_test:]] train_double_labels = labels[~is_single][double_perm[N_double_test:]] # Define function to train a single sklearn clf def try_once(which: str): # logger.info(f"Train an example model for scenario: {which}") clf = ParallelA(dir_clf=get_clf(clf_name), mod_clf=get_clf(clf_name)) control = ControlModel_RandomGuess() model = {"upper": clf, "lower": clf, "aug": clf, "control": control}[which] use_aug = {"upper": False, "lower": False, "aug": True, "control": False}[which] doubles_in_train = {"upper": True, "lower": False, "aug": False, "control": True}[which] if doubles_in_train: x_train = torch.cat((train_single_feat, train_double_feat)) y_train = torch.cat((train_single_labels, train_double_labels)) else: x_train = train_single_feat y_train = train_single_labels if use_aug: x_aug, y_aug = embedding.feature_combination(train_single_feat, train_single_labels) # logger.info(f"Real singles: {len(x_train)}, augmented: {len(x_aug)}") if N_aug_each_class is not None: x_aug, y_aug = subset_each_class(x_aug, y_aug, N_aug_each_class) # logger.info(f"Subset augmented: {len(x_aug)}") x_train = torch.cat([x_train, x_aug]) y_train = torch.cat([y_train, y_aug]) x_test = torch.cat([test_single_feat, test_double_feat]) y_test = torch.cat([test_single_labels, test_double_labels]) # After (possibly) applying augmentation fn - then we can convert to numpy # That way, augmentation fn can assume its input to be torch tensor x_train = x_train.cpu().numpy() y_train = y_train.cpu().numpy() x_test = x_test.cpu().numpy() y_test = y_test.cpu().numpy() model.fit(x_train, y_train) preds = model.predict(x_test) cm = get_combo_conf_mat(y_test, preds) cm_counts = get_combo_conf_mat(y_test, preds, normalize=False) single_bal_acc = np.nanmean(np.diag(cm)[:8]) double_bal_acc = np.nanmean(np.diag(cm)[8:]) overall_bal_acc = np.nanmean(np.diag(cm)) return { "single_bal_acc": single_bal_acc, "double_bal_acc": double_bal_acc, "overall_bal_acc": overall_bal_acc, "confusion_matrix": cm, "confusion_matrix_counts": cm_counts, } return { # Train once with singles and doubles (upper-bound performance) "upper_bound": try_once("upper"), # Train once with only singles, no augmentation (lower-bound performance) "lower_bound": try_once("lower"), # Train once with singles only and augmentation "augmented": try_once("aug"), # Train once with a random model (lower-bound performance) # "control": try_once("control"), } def fresh_classifier_one_ckpt(ckpt, clf_name: str, n_aug: Optional[int]): embedding = LearnedEmbedding.load_from_checkpoint(ckpt) embedding.eval() per_subj_data = get_per_subj_data()
"""Train fresh classifiers using test checkpoints""" device = torch.device("cuda" if torch.cuda.is_available() else "cpu") logger.remove() logger.add(lambda msg: tqdm.write(msg, end=""), colorize=True) class FailedRunError(Exception): pass def load_one(folder: Path, which="best"): """Extract train, val, and test metrics from the specified checkpoint (best or last). Also extract hyperparams from hparams.yaml file.""" # Given a checkpoint like this: best__epoch=38__step=14664__val_aug_overall_acc=0.569.ckpt # We want to extract the step: 14664 ckpts = folder / "checkpoints" matching_ckpts = list(ckpts.glob(f"{which}*.ckpt")) if len(matching_ckpts) == 0: raise FailedRunError(f"No checkpoint found for {which} in {folder}") # When there are multiple runs, take the most recent. # Since only 1 metrics.csv is kept, this matches the latest ckpt chosen_ckpt = max(matching_ckpts, key=lambda x: x.stat().st_mtime) step = int(re.match(rf"{which}__epoch=\d+__step=(\d+)", chosen_ckpt.name).group(1)) metrics = pd.read_csv(folder / "metrics.csv") results = {} # NOTE - for this experiment, we ignore the test results, which come from fine-tuning, # since we will train a fresh classifier instead for split in ["train", "val"]: cols = [col for col in metrics.columns if col.startswith(split)] if len(cols) == 0: raise FailedRunError(f"No {split} metrics found in {folder}") cols.append("step") subset = metrics[cols].dropna().set_index("step") subset = subset.iloc[subset.index.get_indexer([step], method="nearest")] assert len(subset) == 1 results.update(**subset.to_dict(orient="records")[0]) hparams = yaml.safe_load((folder / "hparams.yaml").read_text()) return hparams, results, chosen_ckpt def subset_one_class(X, Y, N): idx = np.random.choice(len(X), size=N, replace=False) return X[idx], Y[idx] def subset_each_class(X, Y, N): result_x, result_y = [], [] for y in Y.unique(dim=0): idx = (Y == y).all(-1) x = X[idx] y = Y[idx] x, y = subset_one_class(x, y, N) result_x.append(x) result_y.append(y) return torch.cat(result_x), torch.cat(result_y) def get_clf(name: str): if name == "logr": return LogR(class_weight="balanced", max_iter=4000, n_jobs=-1) elif name == "lda": return LDA() elif name == "knn": return KNN(n_jobs=-1) elif name == "rf": return RF(n_jobs=-1, class_weight="balanced") elif name == "dt": return DT(class_weight="balanced") else: raise ValueError(f"Unknown classifier name: {name}") @torch.no_grad() def try_fresh_classifier(embedding, test_loader, clf_name: str, test_frac=0.2, N_aug_each_class=500): # Get features embedding.to(device) features, labels, is_single = [], [], [] for batch_data, batch_labels, batch_is_single, _subj_ids in test_loader: features.append(embedding(batch_data.to(device))) labels.append(batch_labels.to(device)) is_single.append(batch_is_single) features = torch.cat(features) labels = torch.cat(labels) is_single = torch.cat(is_single) # Create a single train/test split N_single = is_single.sum().item() N_single_test = int(N_single * test_frac) N_double = (~is_single).sum().item() N_double_test = int(N_double * test_frac) np.random.seed(0) single_perm = np.random.permutation(N_single) test_single_feat = features[is_single][single_perm[:N_single_test]] test_single_labels = labels[is_single][single_perm[:N_single_test]] train_single_feat = features[is_single][single_perm[N_single_test:]] train_single_labels = labels[is_single][single_perm[N_single_test:]] double_perm = np.random.permutation(N_double) test_double_feat = features[~is_single][double_perm[:N_double_test]] test_double_labels = labels[~is_single][double_perm[:N_double_test]] train_double_feat = features[~is_single][double_perm[N_double_test:]] train_double_labels = labels[~is_single][double_perm[N_double_test:]] # Define function to train a single sklearn clf def try_once(which: str): # logger.info(f"Train an example model for scenario: {which}") clf = ParallelA(dir_clf=get_clf(clf_name), mod_clf=get_clf(clf_name)) control = ControlModel_RandomGuess() model = {"upper": clf, "lower": clf, "aug": clf, "control": control}[which] use_aug = {"upper": False, "lower": False, "aug": True, "control": False}[which] doubles_in_train = {"upper": True, "lower": False, "aug": False, "control": True}[which] if doubles_in_train: x_train = torch.cat((train_single_feat, train_double_feat)) y_train = torch.cat((train_single_labels, train_double_labels)) else: x_train = train_single_feat y_train = train_single_labels if use_aug: x_aug, y_aug = embedding.feature_combination(train_single_feat, train_single_labels) # logger.info(f"Real singles: {len(x_train)}, augmented: {len(x_aug)}") if N_aug_each_class is not None: x_aug, y_aug = subset_each_class(x_aug, y_aug, N_aug_each_class) # logger.info(f"Subset augmented: {len(x_aug)}") x_train = torch.cat([x_train, x_aug]) y_train = torch.cat([y_train, y_aug]) x_test = torch.cat([test_single_feat, test_double_feat]) y_test = torch.cat([test_single_labels, test_double_labels]) # After (possibly) applying augmentation fn - then we can convert to numpy # That way, augmentation fn can assume its input to be torch tensor x_train = x_train.cpu().numpy() y_train = y_train.cpu().numpy() x_test = x_test.cpu().numpy() y_test = y_test.cpu().numpy() model.fit(x_train, y_train) preds = model.predict(x_test) cm = get_combo_conf_mat(y_test, preds) cm_counts = get_combo_conf_mat(y_test, preds, normalize=False) single_bal_acc = np.nanmean(np.diag(cm)[:8]) double_bal_acc = np.nanmean(np.diag(cm)[8:]) overall_bal_acc = np.nanmean(np.diag(cm)) return { "single_bal_acc": single_bal_acc, "double_bal_acc": double_bal_acc, "overall_bal_acc": overall_bal_acc, "confusion_matrix": cm, "confusion_matrix_counts": cm_counts, } return { # Train once with singles and doubles (upper-bound performance) "upper_bound": try_once("upper"), # Train once with only singles, no augmentation (lower-bound performance) "lower_bound": try_once("lower"), # Train once with singles only and augmentation "augmented": try_once("aug"), # Train once with a random model (lower-bound performance) # "control": try_once("control"), } def fresh_classifier_one_ckpt(ckpt, clf_name: str, n_aug: Optional[int]): embedding = LearnedEmbedding.load_from_checkpoint(ckpt) embedding.eval() per_subj_data = get_per_subj_data()
datamodule = DataModule(per_subj_data=per_subj_data, **embedding.hparams)
0
2023-11-01 21:12:05+00:00
12k
SqueezeAILab/LLMCompiler
src/chains/llm_math_chain.py
[ { "identifier": "Chain", "path": "src/chains/chain.py", "snippet": "class Chain(Serializable, Runnable[Dict[str, Any], Dict[str, Any]], ABC):\n \"\"\"Abstract base class for creating structured sequences of calls to components.\n\n Chains should be used to encode a sequence of calls to components like\n models, document retrievers, other chains, etc., and provide a simple interface\n to this sequence.\n\n Copied from langchain v0.0.283.\n\n The Chain interface makes it easy to create apps that are:\n - Stateful: add Memory to any Chain to give it state,\n - Observable: pass Callbacks to a Chain to execute additional functionality,\n like logging, outside the main sequence of component calls,\n - Composable: the Chain API is flexible enough that it is easy to combine\n Chains with other components, including other Chains.\n\n The main methods exposed by chains are:\n - `__call__`: Chains are callable. The `__call__` method is the primary way to\n execute a Chain. This takes inputs as a dictionary and returns a\n dictionary output.\n - `run`: A convenience method that takes inputs as args/kwargs and returns the\n output as a string or object. This method can only be used for a subset of\n chains and cannot return as rich of an output as `__call__`.\n \"\"\"\n\n def invoke(\n self,\n input: Dict[str, Any],\n config: Optional[RunnableConfig] = None,\n **kwargs: Any,\n ) -> Dict[str, Any]:\n config = config or {}\n return self(\n input,\n callbacks=config.get(\"callbacks\"),\n tags=config.get(\"tags\"),\n metadata=config.get(\"metadata\"),\n run_name=config.get(\"run_name\"),\n **kwargs,\n )\n\n async def ainvoke(\n self,\n input: Dict[str, Any],\n config: Optional[RunnableConfig] = None,\n **kwargs: Any,\n ) -> Dict[str, Any]:\n if type(self)._acall == Chain._acall:\n # If the chain does not implement async, fall back to default implementation\n return await asyncio.get_running_loop().run_in_executor(\n None, partial(self.invoke, input, config, **kwargs)\n )\n\n config = config or {}\n return await self.acall(\n input,\n callbacks=config.get(\"callbacks\"),\n tags=config.get(\"tags\"),\n metadata=config.get(\"metadata\"),\n run_name=config.get(\"run_name\"),\n **kwargs,\n )\n\n memory: Optional[BaseMemory] = None\n \"\"\"Optional memory object. Defaults to None.\n Memory is a class that gets called at the start\n and at the end of every chain. At the start, memory loads variables and passes\n them along in the chain. At the end, it saves any returned variables.\n There are many different types of memory - please see memory docs\n for the full catalog.\"\"\"\n callbacks: Callbacks = Field(default=None, exclude=True)\n \"\"\"Optional list of callback handlers (or callback manager). Defaults to None.\n Callback handlers are called throughout the lifecycle of a call to a chain,\n starting with on_chain_start, ending with on_chain_end or on_chain_error.\n Each custom chain can optionally call additional callback methods, see Callback docs\n for full details.\"\"\"\n callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True)\n \"\"\"Deprecated, use `callbacks` instead.\"\"\"\n verbose: bool = Field(default_factory=_get_verbosity)\n \"\"\"Whether or not run in verbose mode. In verbose mode, some intermediate logs\n will be printed to the console. Defaults to `langchain.verbose` value.\"\"\"\n tags: Optional[List[str]] = None\n \"\"\"Optional list of tags associated with the chain. Defaults to None.\n These tags will be associated with each call to this chain,\n and passed as arguments to the handlers defined in `callbacks`.\n You can use these to eg identify a specific instance of a chain with its use case.\n \"\"\"\n metadata: Optional[Dict[str, Any]] = None\n \"\"\"Optional metadata associated with the chain. Defaults to None.\n This metadata will be associated with each call to this chain,\n and passed as arguments to the handlers defined in `callbacks`.\n You can use these to eg identify a specific instance of a chain with its use case.\n \"\"\"\n\n class Config:\n \"\"\"Configuration for this pydantic object.\"\"\"\n\n arbitrary_types_allowed = True\n\n @property\n def _chain_type(self) -> str:\n raise NotImplementedError(\"Saving not supported for this chain type.\")\n\n @root_validator()\n def raise_callback_manager_deprecation(cls, values: Dict) -> Dict:\n \"\"\"Raise deprecation warning if callback_manager is used.\"\"\"\n if values.get(\"callback_manager\") is not None:\n if values.get(\"callbacks\") is not None:\n raise ValueError(\n \"Cannot specify both callback_manager and callbacks. \"\n \"callback_manager is deprecated, callbacks is the preferred \"\n \"parameter to pass in.\"\n )\n warnings.warn(\n \"callback_manager is deprecated. Please use callbacks instead.\",\n DeprecationWarning,\n )\n values[\"callbacks\"] = values.pop(\"callback_manager\", None)\n return values\n\n @validator(\"verbose\", pre=True, always=True)\n def set_verbose(cls, verbose: Optional[bool]) -> bool:\n \"\"\"Set the chain verbosity.\n\n Defaults to the global setting if not specified by the user.\n \"\"\"\n if verbose is None:\n return _get_verbosity()\n else:\n return verbose\n\n @property\n @abstractmethod\n def input_keys(self) -> List[str]:\n \"\"\"Keys expected to be in the chain input.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def output_keys(self) -> List[str]:\n \"\"\"Keys expected to be in the chain output.\"\"\"\n raise NotImplementedError\n\n def _validate_inputs(self, inputs: Dict[str, Any]) -> None:\n \"\"\"Check that all inputs are present.\"\"\"\n missing_keys = set(self.input_keys).difference(inputs)\n if missing_keys:\n raise ValueError(f\"Missing some input keys: {missing_keys}\")\n\n def _validate_outputs(self, outputs: Dict[str, Any]) -> None:\n missing_keys = set(self.output_keys).difference(outputs)\n if missing_keys:\n raise ValueError(f\"Missing some output keys: {missing_keys}\")\n\n @abstractmethod\n def _call(\n self,\n inputs: Dict[str, Any],\n run_manager: Optional[CallbackManagerForChainRun] = None,\n ) -> Dict[str, Any]:\n \"\"\"Execute the chain.\n\n This is a private method that is not user-facing. It is only called within\n `Chain.__call__`, which is the user-facing wrapper method that handles\n callbacks configuration and some input/output processing.\n\n Args:\n inputs: A dict of named inputs to the chain. Assumed to contain all inputs\n specified in `Chain.input_keys`, including any inputs added by memory.\n run_manager: The callbacks manager that contains the callback handlers for\n this run of the chain.\n\n Returns:\n A dict of named outputs. Should contain all outputs specified in\n `Chain.output_keys`.\n \"\"\"\n raise NotImplementedError\n\n async def _acall(\n self,\n inputs: Dict[str, Any],\n run_manager: Optional[AsyncCallbackManagerForChainRun] = None,\n ) -> Dict[str, Any]:\n \"\"\"Asynchronously execute the chain.\n\n This is a private method that is not user-facing. It is only called within\n `Chain.acall`, which is the user-facing wrapper method that handles\n callbacks configuration and some input/output processing.\n\n Args:\n inputs: A dict of named inputs to the chain. Assumed to contain all inputs\n specified in `Chain.input_keys`, including any inputs added by memory.\n run_manager: The callbacks manager that contains the callback handlers for\n this run of the chain.\n\n Returns:\n A dict of named outputs. Should contain all outputs specified in\n `Chain.output_keys`.\n \"\"\"\n raise NotImplementedError(\"Async call not supported for this chain type.\")\n\n def __call__(\n self,\n inputs: Union[Dict[str, Any], Any],\n return_only_outputs: bool = False,\n callbacks: Callbacks = None,\n *,\n tags: Optional[List[str]] = None,\n metadata: Optional[Dict[str, Any]] = None,\n run_name: Optional[str] = None,\n include_run_info: bool = False,\n ) -> Dict[str, Any]:\n \"\"\"Execute the chain.\n\n Args:\n inputs: Dictionary of inputs, or single input if chain expects\n only one param. Should contain all inputs specified in\n `Chain.input_keys` except for inputs that will be set by the chain's\n memory.\n return_only_outputs: Whether to return only outputs in the\n response. If True, only new keys generated by this chain will be\n returned. If False, both input keys and new keys generated by this\n chain will be returned. Defaults to False.\n callbacks: Callbacks to use for this chain run. These will be called in\n addition to callbacks passed to the chain during construction, but only\n these runtime callbacks will propagate to calls to other objects.\n tags: List of string tags to pass to all callbacks. These will be passed in\n addition to tags passed to the chain during construction, but only\n these runtime tags will propagate to calls to other objects.\n metadata: Optional metadata associated with the chain. Defaults to None\n include_run_info: Whether to include run info in the response. Defaults\n to False.\n\n Returns:\n A dict of named outputs. Should contain all outputs specified in\n `Chain.output_keys`.\n \"\"\"\n inputs = self.prep_inputs(inputs)\n callback_manager = CallbackManager.configure(\n callbacks,\n self.callbacks,\n self.verbose,\n tags,\n self.tags,\n metadata,\n self.metadata,\n )\n new_arg_supported = inspect.signature(self._call).parameters.get(\"run_manager\")\n run_manager = callback_manager.on_chain_start(\n dumpd(self),\n inputs,\n name=run_name,\n )\n try:\n outputs = (\n self._call(inputs, run_manager=run_manager)\n if new_arg_supported\n else self._call(inputs)\n )\n except (KeyboardInterrupt, Exception) as e:\n run_manager.on_chain_error(e)\n raise e\n run_manager.on_chain_end(outputs)\n final_outputs: Dict[str, Any] = self.prep_outputs(\n inputs, outputs, return_only_outputs\n )\n if include_run_info:\n final_outputs[RUN_KEY] = RunInfo(run_id=run_manager.run_id)\n return final_outputs\n\n async def acall(\n self,\n inputs: Union[Dict[str, Any], Any],\n return_only_outputs: bool = False,\n callbacks: Callbacks = None,\n *,\n tags: Optional[List[str]] = None,\n metadata: Optional[Dict[str, Any]] = None,\n run_name: Optional[str] = None,\n include_run_info: bool = False,\n ) -> Dict[str, Any]:\n \"\"\"Asynchronously execute the chain.\n\n Args:\n inputs: Dictionary of inputs, or single input if chain expects\n only one param. Should contain all inputs specified in\n `Chain.input_keys` except for inputs that will be set by the chain's\n memory.\n return_only_outputs: Whether to return only outputs in the\n response. If True, only new keys generated by this chain will be\n returned. If False, both input keys and new keys generated by this\n chain will be returned. Defaults to False.\n callbacks: Callbacks to use for this chain run. These will be called in\n addition to callbacks passed to the chain during construction, but only\n these runtime callbacks will propagate to calls to other objects.\n tags: List of string tags to pass to all callbacks. These will be passed in\n addition to tags passed to the chain during construction, but only\n these runtime tags will propagate to calls to other objects.\n metadata: Optional metadata associated with the chain. Defaults to None\n include_run_info: Whether to include run info in the response. Defaults\n to False.\n\n Returns:\n A dict of named outputs. Should contain all outputs specified in\n `Chain.output_keys`.\n \"\"\"\n inputs = self.prep_inputs(inputs)\n callback_manager = AsyncCallbackManager.configure(\n callbacks,\n self.callbacks,\n self.verbose,\n tags,\n self.tags,\n metadata,\n self.metadata,\n )\n new_arg_supported = inspect.signature(self._acall).parameters.get(\"run_manager\")\n run_manager = await callback_manager.on_chain_start(\n dumpd(self),\n inputs,\n name=run_name,\n )\n try:\n outputs = (\n await self._acall(inputs, run_manager=run_manager)\n if new_arg_supported\n else await self._acall(inputs)\n )\n except (KeyboardInterrupt, Exception) as e:\n await run_manager.on_chain_error(e)\n raise e\n await run_manager.on_chain_end(outputs)\n final_outputs: Dict[str, Any] = self.prep_outputs(\n inputs, outputs, return_only_outputs\n )\n if include_run_info:\n final_outputs[RUN_KEY] = RunInfo(run_id=run_manager.run_id)\n return final_outputs\n\n def prep_outputs(\n self,\n inputs: Dict[str, str],\n outputs: Dict[str, str],\n return_only_outputs: bool = False,\n ) -> Dict[str, str]:\n \"\"\"Validate and prepare chain outputs, and save info about this run to memory.\n\n Args:\n inputs: Dictionary of chain inputs, including any inputs added by chain\n memory.\n outputs: Dictionary of initial chain outputs.\n return_only_outputs: Whether to only return the chain outputs. If False,\n inputs are also added to the final outputs.\n\n Returns:\n A dict of the final chain outputs.\n \"\"\"\n self._validate_outputs(outputs)\n if self.memory is not None:\n self.memory.save_context(inputs, outputs)\n if return_only_outputs:\n return outputs\n else:\n return {**inputs, **outputs}\n\n def prep_inputs(self, inputs: Union[Dict[str, Any], Any]) -> Dict[str, str]:\n \"\"\"Validate and prepare chain inputs, including adding inputs from memory.\n\n Args:\n inputs: Dictionary of raw inputs, or single input if chain expects\n only one param. Should contain all inputs specified in\n `Chain.input_keys` except for inputs that will be set by the chain's\n memory.\n\n Returns:\n A dictionary of all inputs, including those added by the chain's memory.\n \"\"\"\n if not isinstance(inputs, dict):\n _input_keys = set(self.input_keys)\n if self.memory is not None:\n # If there are multiple input keys, but some get set by memory so that\n # only one is not set, we can still figure out which key it is.\n _input_keys = _input_keys.difference(self.memory.memory_variables)\n if len(_input_keys) != 1:\n raise ValueError(\n f\"A single string input was passed in, but this chain expects \"\n f\"multiple inputs ({_input_keys}). When a chain expects \"\n f\"multiple inputs, please call it by passing in a dictionary, \"\n \"eg `chain({'foo': 1, 'bar': 2})`\"\n )\n inputs = {list(_input_keys)[0]: inputs}\n if self.memory is not None:\n external_context = self.memory.load_memory_variables(inputs)\n inputs = dict(inputs, **external_context)\n self._validate_inputs(inputs)\n return inputs\n\n @property\n def _run_output_key(self) -> str:\n if len(self.output_keys) != 1:\n raise ValueError(\n f\"`run` not supported when there is not exactly \"\n f\"one output key. Got {self.output_keys}.\"\n )\n return self.output_keys[0]\n\n def run(\n self,\n *args: Any,\n callbacks: Callbacks = None,\n tags: Optional[List[str]] = None,\n metadata: Optional[Dict[str, Any]] = None,\n **kwargs: Any,\n ) -> Any:\n \"\"\"Convenience method for executing chain.\n\n The main difference between this method and `Chain.__call__` is that this\n method expects inputs to be passed directly in as positional arguments or\n keyword arguments, whereas `Chain.__call__` expects a single input dictionary\n with all the inputs\n\n Args:\n *args: If the chain expects a single input, it can be passed in as the\n sole positional argument.\n callbacks: Callbacks to use for this chain run. These will be called in\n addition to callbacks passed to the chain during construction, but only\n these runtime callbacks will propagate to calls to other objects.\n tags: List of string tags to pass to all callbacks. These will be passed in\n addition to tags passed to the chain during construction, but only\n these runtime tags will propagate to calls to other objects.\n **kwargs: If the chain expects multiple inputs, they can be passed in\n directly as keyword arguments.\n\n Returns:\n The chain output.\n\n Example:\n .. code-block:: python\n\n # Suppose we have a single-input chain that takes a 'question' string:\n chain.run(\"What's the temperature in Boise, Idaho?\")\n # -> \"The temperature in Boise is...\"\n\n # Suppose we have a multi-input chain that takes a 'question' string\n # and 'context' string:\n question = \"What's the temperature in Boise, Idaho?\"\n context = \"Weather report for Boise, Idaho on 07/03/23...\"\n chain.run(question=question, context=context)\n # -> \"The temperature in Boise is...\"\n \"\"\"\n # Run at start to make sure this is possible/defined\n _output_key = self._run_output_key\n\n if args and not kwargs:\n if len(args) != 1:\n raise ValueError(\"`run` supports only one positional argument.\")\n return self(args[0], callbacks=callbacks, tags=tags, metadata=metadata)[\n _output_key\n ]\n\n if kwargs and not args:\n return self(kwargs, callbacks=callbacks, tags=tags, metadata=metadata)[\n _output_key\n ]\n\n if not kwargs and not args:\n raise ValueError(\n \"`run` supported with either positional arguments or keyword arguments,\"\n \" but none were provided.\"\n )\n else:\n raise ValueError(\n f\"`run` supported with either positional arguments or keyword arguments\"\n f\" but not both. Got args: {args} and kwargs: {kwargs}.\"\n )\n\n async def arun(\n self,\n *args: Any,\n callbacks: Callbacks = None,\n tags: Optional[List[str]] = None,\n metadata: Optional[Dict[str, Any]] = None,\n **kwargs: Any,\n ) -> Any:\n \"\"\"Convenience method for executing chain.\n\n The main difference between this method and `Chain.__call__` is that this\n method expects inputs to be passed directly in as positional arguments or\n keyword arguments, whereas `Chain.__call__` expects a single input dictionary\n with all the inputs\n\n\n Args:\n *args: If the chain expects a single input, it can be passed in as the\n sole positional argument.\n callbacks: Callbacks to use for this chain run. These will be called in\n addition to callbacks passed to the chain during construction, but only\n these runtime callbacks will propagate to calls to other objects.\n tags: List of string tags to pass to all callbacks. These will be passed in\n addition to tags passed to the chain during construction, but only\n these runtime tags will propagate to calls to other objects.\n **kwargs: If the chain expects multiple inputs, they can be passed in\n directly as keyword arguments.\n\n Returns:\n The chain output.\n\n Example:\n .. code-block:: python\n\n # Suppose we have a single-input chain that takes a 'question' string:\n await chain.arun(\"What's the temperature in Boise, Idaho?\")\n # -> \"The temperature in Boise is...\"\n\n # Suppose we have a multi-input chain that takes a 'question' string\n # and 'context' string:\n question = \"What's the temperature in Boise, Idaho?\"\n context = \"Weather report for Boise, Idaho on 07/03/23...\"\n await chain.arun(question=question, context=context)\n # -> \"The temperature in Boise is...\"\n \"\"\"\n if len(self.output_keys) != 1:\n raise ValueError(\n f\"`run` not supported when there is not exactly \"\n f\"one output key. Got {self.output_keys}.\"\n )\n elif args and not kwargs:\n if len(args) != 1:\n raise ValueError(\"`run` supports only one positional argument.\")\n return (\n await self.acall(\n args[0], callbacks=callbacks, tags=tags, metadata=metadata\n )\n )[self.output_keys[0]]\n\n if kwargs and not args:\n return (\n await self.acall(\n kwargs, callbacks=callbacks, tags=tags, metadata=metadata\n )\n )[self.output_keys[0]]\n\n raise ValueError(\n f\"`run` supported with either positional arguments or keyword arguments\"\n f\" but not both. Got args: {args} and kwargs: {kwargs}.\"\n )\n\n def dict(self, **kwargs: Any) -> Dict:\n \"\"\"Dictionary representation of chain.\n\n Expects `Chain._chain_type` property to be implemented and for memory to be\n null.\n\n Args:\n **kwargs: Keyword arguments passed to default `pydantic.BaseModel.dict`\n method.\n\n Returns:\n A dictionary representation of the chain.\n\n Example:\n .. code-block:: python\n\n chain.dict(exclude_unset=True)\n # -> {\"_type\": \"foo\", \"verbose\": False, ...}\n \"\"\"\n if self.memory is not None:\n raise ValueError(\"Saving of memory is not yet supported.\")\n _dict = super().dict(**kwargs)\n _dict[\"_type\"] = self._chain_type\n return _dict\n\n def save(self, file_path: Union[Path, str]) -> None:\n \"\"\"Save the chain.\n\n Expects `Chain._chain_type` property to be implemented and for memory to be\n null.\n\n Args:\n file_path: Path to file to save the chain to.\n\n Example:\n .. code-block:: python\n\n chain.save(file_path=\"path/chain.yaml\")\n \"\"\"\n # Convert file to Path object.\n if isinstance(file_path, str):\n save_path = Path(file_path)\n else:\n save_path = file_path\n\n directory_path = save_path.parent\n directory_path.mkdir(parents=True, exist_ok=True)\n\n # Fetch dictionary to save\n chain_dict = self.dict()\n\n if save_path.suffix == \".json\":\n with open(file_path, \"w\") as f:\n json.dump(chain_dict, f, indent=4)\n elif save_path.suffix == \".yaml\":\n with open(file_path, \"w\") as f:\n yaml.dump(chain_dict, f, default_flow_style=False)\n else:\n raise ValueError(f\"{save_path} must be json or yaml\")\n\n def apply(\n self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None\n ) -> List[Dict[str, str]]:\n \"\"\"Call the chain on all inputs in the list.\"\"\"\n return [self(inputs, callbacks=callbacks) for inputs in input_list]" }, { "identifier": "LLMChain", "path": "src/chains/llm_chain.py", "snippet": "class LLMChain(Chain):\n \"\"\"Chain to run queries against LLMs.\n\n Example:\n .. code-block:: python\n\n from langchain import LLMChain, OpenAI, PromptTemplate\n prompt_template = \"Tell me a {adjective} joke\"\n prompt = PromptTemplate(\n input_variables=[\"adjective\"], template=prompt_template\n )\n llm = LLMChain(llm=OpenAI(), prompt=prompt)\n \"\"\"\n\n @property\n def lc_serializable(self) -> bool:\n return True\n\n prompt: BasePromptTemplate\n \"\"\"Prompt object to use.\"\"\"\n llm: BaseLanguageModel\n \"\"\"Language model to call.\"\"\"\n output_key: str = \"text\" #: :meta private:\n output_parser: BaseLLMOutputParser = Field(default_factory=StrOutputParser)\n \"\"\"Output parser to use.\n Defaults to one that takes the most likely string but does not change it\n otherwise.\"\"\"\n return_final_only: bool = True\n \"\"\"Whether to return only the final parsed result. Defaults to True.\n If false, will return a bunch of extra information about the generation.\"\"\"\n llm_kwargs: dict = Field(default_factory=dict)\n\n class Config:\n \"\"\"Configuration for this pydantic object.\"\"\"\n\n extra = Extra.forbid\n arbitrary_types_allowed = True\n\n @property\n def input_keys(self) -> List[str]:\n \"\"\"Will be whatever keys the prompt expects.\n\n :meta private:\n \"\"\"\n return self.prompt.input_variables\n\n @property\n def output_keys(self) -> List[str]:\n \"\"\"Will always return text key.\n\n :meta private:\n \"\"\"\n if self.return_final_only:\n return [self.output_key]\n else:\n return [self.output_key, \"full_generation\"]\n\n def _call(\n self,\n inputs: Dict[str, Any],\n run_manager: Optional[CallbackManagerForChainRun] = None,\n ) -> Dict[str, str]:\n response = self.generate([inputs], run_manager=run_manager)\n return self.create_outputs(response)[0]\n\n def generate(\n self,\n input_list: List[Dict[str, Any]],\n run_manager: Optional[CallbackManagerForChainRun] = None,\n ) -> LLMResult:\n \"\"\"Generate LLM result from inputs.\"\"\"\n prompts, stop = self.prep_prompts(input_list, run_manager=run_manager)\n return self.llm.generate_prompt(\n prompts,\n stop,\n callbacks=run_manager.get_child() if run_manager else None,\n **self.llm_kwargs,\n )\n\n async def agenerate(\n self,\n input_list: List[Dict[str, Any]],\n run_manager: Optional[AsyncCallbackManagerForChainRun] = None,\n ) -> LLMResult:\n \"\"\"Generate LLM result from inputs.\"\"\"\n prompts, stop = await self.aprep_prompts(input_list, run_manager=run_manager)\n return await self.llm.agenerate_prompt(\n prompts,\n stop,\n callbacks=run_manager.get_child() if run_manager else None,\n **self.llm_kwargs,\n )\n\n def prep_prompts(\n self,\n input_list: List[Dict[str, Any]],\n run_manager: Optional[CallbackManagerForChainRun] = None,\n ) -> Tuple[List[PromptValue], Optional[List[str]]]:\n \"\"\"Prepare prompts from inputs.\"\"\"\n stop = None\n if len(input_list) == 0:\n return [], stop\n if \"stop\" in input_list[0]:\n stop = input_list[0][\"stop\"]\n prompts = []\n for inputs in input_list:\n selected_inputs = {k: inputs[k] for k in self.prompt.input_variables}\n prompt = self.prompt.format_prompt(**selected_inputs)\n _colored_text = get_colored_text(prompt.to_string(), \"green\")\n _text = \"Prompt after formatting:\\n\" + _colored_text\n if run_manager:\n run_manager.on_text(_text, end=\"\\n\", verbose=self.verbose)\n if \"stop\" in inputs and inputs[\"stop\"] != stop:\n raise ValueError(\n \"If `stop` is present in any inputs, should be present in all.\"\n )\n prompts.append(prompt)\n return prompts, stop\n\n async def aprep_prompts(\n self,\n input_list: List[Dict[str, Any]],\n run_manager: Optional[AsyncCallbackManagerForChainRun] = None,\n ) -> Tuple[List[PromptValue], Optional[List[str]]]:\n \"\"\"Prepare prompts from inputs.\"\"\"\n stop = None\n if len(input_list) == 0:\n return [], stop\n if \"stop\" in input_list[0]:\n stop = input_list[0][\"stop\"]\n prompts = []\n for inputs in input_list:\n selected_inputs = {k: inputs[k] for k in self.prompt.input_variables}\n prompt = self.prompt.format_prompt(**selected_inputs)\n _colored_text = get_colored_text(prompt.to_string(), \"green\")\n _text = \"Prompt after formatting:\\n\" + _colored_text\n if run_manager:\n await run_manager.on_text(_text, end=\"\\n\", verbose=self.verbose)\n if \"stop\" in inputs and inputs[\"stop\"] != stop:\n raise ValueError(\n \"If `stop` is present in any inputs, should be present in all.\"\n )\n prompts.append(prompt)\n return prompts, stop\n\n def apply(\n self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None\n ) -> List[Dict[str, str]]:\n \"\"\"Utilize the LLM generate method for speed gains.\"\"\"\n callback_manager = CallbackManager.configure(\n callbacks, self.callbacks, self.verbose\n )\n run_manager = callback_manager.on_chain_start(\n dumpd(self),\n {\"input_list\": input_list},\n )\n try:\n response = self.generate(input_list, run_manager=run_manager)\n except (KeyboardInterrupt, Exception) as e:\n run_manager.on_chain_error(e)\n raise e\n outputs = self.create_outputs(response)\n run_manager.on_chain_end({\"outputs\": outputs})\n return outputs\n\n async def aapply(\n self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None\n ) -> List[Dict[str, str]]:\n \"\"\"Utilize the LLM generate method for speed gains.\"\"\"\n callback_manager = AsyncCallbackManager.configure(\n callbacks, self.callbacks, self.verbose\n )\n run_manager = await callback_manager.on_chain_start(\n dumpd(self),\n {\"input_list\": input_list},\n )\n try:\n response = await self.agenerate(input_list, run_manager=run_manager)\n except (KeyboardInterrupt, Exception) as e:\n await run_manager.on_chain_error(e)\n raise e\n outputs = self.create_outputs(response)\n await run_manager.on_chain_end({\"outputs\": outputs})\n return outputs\n\n @property\n def _run_output_key(self) -> str:\n return self.output_key\n\n def create_outputs(self, llm_result: LLMResult) -> List[Dict[str, Any]]:\n \"\"\"Create outputs from response.\"\"\"\n result = [\n # Get the text of the top generated string.\n {\n self.output_key: self.output_parser.parse_result(generation),\n \"full_generation\": generation,\n }\n for generation in llm_result.generations\n ]\n if self.return_final_only:\n result = [{self.output_key: r[self.output_key]} for r in result]\n return result\n\n async def _acall(\n self,\n inputs: Dict[str, Any],\n run_manager: Optional[AsyncCallbackManagerForChainRun] = None,\n ) -> Dict[str, str]:\n response = await self.agenerate([inputs], run_manager=run_manager)\n return self.create_outputs(response)[0]\n\n def predict(self, callbacks: Callbacks = None, **kwargs: Any) -> str:\n \"\"\"Format prompt with kwargs and pass to LLM.\n\n Args:\n callbacks: Callbacks to pass to LLMChain\n **kwargs: Keys to pass to prompt template.\n\n Returns:\n Completion from LLM.\n\n Example:\n .. code-block:: python\n\n completion = llm.predict(adjective=\"funny\")\n \"\"\"\n return self(kwargs, callbacks=callbacks)[self.output_key]\n\n async def apredict(self, callbacks: Callbacks = None, **kwargs: Any) -> str:\n \"\"\"Format prompt with kwargs and pass to LLM.\n\n Args:\n callbacks: Callbacks to pass to LLMChain\n **kwargs: Keys to pass to prompt template.\n\n Returns:\n Completion from LLM.\n\n Example:\n .. code-block:: python\n\n completion = llm.predict(adjective=\"funny\")\n \"\"\"\n return (await self.acall(kwargs, callbacks=callbacks))[self.output_key]\n\n def predict_and_parse(\n self, callbacks: Callbacks = None, **kwargs: Any\n ) -> Union[str, List[str], Dict[str, Any]]:\n \"\"\"Call predict and then parse the results.\"\"\"\n warnings.warn(\n \"The predict_and_parse method is deprecated, \"\n \"instead pass an output parser directly to LLMChain.\"\n )\n result = self.predict(callbacks=callbacks, **kwargs)\n if self.prompt.output_parser is not None:\n return self.prompt.output_parser.parse(result)\n else:\n return result\n\n async def apredict_and_parse(\n self, callbacks: Callbacks = None, **kwargs: Any\n ) -> Union[str, List[str], Dict[str, str]]:\n \"\"\"Call apredict and then parse the results.\"\"\"\n warnings.warn(\n \"The apredict_and_parse method is deprecated, \"\n \"instead pass an output parser directly to LLMChain.\"\n )\n result = await self.apredict(callbacks=callbacks, **kwargs)\n if self.prompt.output_parser is not None:\n return self.prompt.output_parser.parse(result)\n else:\n return result\n\n def apply_and_parse(\n self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None\n ) -> Sequence[Union[str, List[str], Dict[str, str]]]:\n \"\"\"Call apply and then parse the results.\"\"\"\n warnings.warn(\n \"The apply_and_parse method is deprecated, \"\n \"instead pass an output parser directly to LLMChain.\"\n )\n result = self.apply(input_list, callbacks=callbacks)\n return self._parse_generation(result)\n\n def _parse_generation(\n self, generation: List[Dict[str, str]]\n ) -> Sequence[Union[str, List[str], Dict[str, str]]]:\n if self.prompt.output_parser is not None:\n return [\n self.prompt.output_parser.parse(res[self.output_key])\n for res in generation\n ]\n else:\n return generation\n\n async def aapply_and_parse(\n self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None\n ) -> Sequence[Union[str, List[str], Dict[str, str]]]:\n \"\"\"Call apply and then parse the results.\"\"\"\n warnings.warn(\n \"The aapply_and_parse method is deprecated, \"\n \"instead pass an output parser directly to LLMChain.\"\n )\n result = await self.aapply(input_list, callbacks=callbacks)\n return self._parse_generation(result)\n\n @property\n def _chain_type(self) -> str:\n return \"llm_chain\"\n\n @classmethod\n def from_string(cls, llm: BaseLanguageModel, template: str) -> LLMChain:\n \"\"\"Create LLMChain from LLM and template.\"\"\"\n prompt_template = PromptTemplate.from_template(template)\n return cls(llm=llm, prompt=prompt_template)" } ]
import ast import math import re import warnings import numexpr from typing import Any, Dict, List, Optional from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, CallbackManagerForChainRun, ) from langchain.prompts.prompt import PromptTemplate from langchain.pydantic_v1 import Extra, root_validator from langchain.schema import BasePromptTemplate from langchain.schema.language_model import BaseLanguageModel from src.chains.chain import Chain from src.chains.llm_chain import LLMChain
9,431
"""Chain that interprets a prompt and executes python code to do math.""" from __future__ import annotations # flake8: noqa _PROMPT_TEMPLATE = """Translate a math problem into a expression that can be executed using Python's numexpr library. Use the output of running this code to answer the question. You MUST follow the following guidelines: - Do not use "where(...)" expressions in your code since it is not supported. - Do not use "fmax(...)" expression in your code since it is not supported. Use "max(...)" instead. - Never introduce a variable. For instance "gazelle_max_speed * 1.4" is not allowed. Pick up a correct number from the given context. Question: ${{Question with math problem.}} ```text ${{single line mathematical expression that solves the problem}} ``` ...numexpr.evaluate(text)... ```output ${{Output of running the code}} ``` Answer: ${{Answer}} Begin. Question: What is 37593 * 67? ```text 37593 * 67 ``` ...numexpr.evaluate("37593 * 67")... ```output 2518731 ``` Answer: 2518731 Question: 37593^(1/5) ```text 37593**(1/5) ``` ...numexpr.evaluate("37593**(1/5)")... ```output 8.222831614237718 ``` Answer: 8.222831614237718 Question: {question} """ PROMPT = PromptTemplate( input_variables=["question"], template=_PROMPT_TEMPLATE, ) # helper functions to handle min and max functions def compute_function(match): func, values = match.groups() # Extract numbers and remove commas from between digits numbers = [float(re.sub(r"(?<=\d),(?=\d)", "", v)) for v in values.split(",")] # Compute the min or max based on the detected function result = min(numbers) if func == "min" else max(numbers) return str(result) class MaxTransformer(ast.NodeTransformer): def visit_Call(self, node): self.generic_visit(node) # Apply the transformation to child nodes first if isinstance(node.func, ast.Name) and node.func.id in ("max", "min"): if all(isinstance(arg, (ast.Num, ast.Constant)) for arg in node.args): # Calculate the max value # print(node.args) args_as_strings = (ast.unparse(arg) for arg in node.args) args_str = ", ".join(args_as_strings) print(args_str) if node.func.id == "min": value = min( arg.n if isinstance(arg, ast.Num) else arg.value for arg in node.args ) else: value = max( arg.n if isinstance(arg, ast.Num) else arg.value for arg in node.args ) # Replace the max call with the max value directly return ast.copy_location(ast.Constant(value=value), node) return node def replace_min_max_functions(expression): # Parse the expression into an AST parsed_expression = ast.parse(expression, mode="eval") # Transform the AST transformer = MaxTransformer() transformed_ast = transformer.visit(parsed_expression) # Fix the missing locations in the AST transformed_ast = ast.fix_missing_locations(transformed_ast) # Compile the transformed AST compiled_code = compile(transformed_ast, "<string>", "eval") # Evaluate the compiled code result = eval(compiled_code) return str(result)
"""Chain that interprets a prompt and executes python code to do math.""" from __future__ import annotations # flake8: noqa _PROMPT_TEMPLATE = """Translate a math problem into a expression that can be executed using Python's numexpr library. Use the output of running this code to answer the question. You MUST follow the following guidelines: - Do not use "where(...)" expressions in your code since it is not supported. - Do not use "fmax(...)" expression in your code since it is not supported. Use "max(...)" instead. - Never introduce a variable. For instance "gazelle_max_speed * 1.4" is not allowed. Pick up a correct number from the given context. Question: ${{Question with math problem.}} ```text ${{single line mathematical expression that solves the problem}} ``` ...numexpr.evaluate(text)... ```output ${{Output of running the code}} ``` Answer: ${{Answer}} Begin. Question: What is 37593 * 67? ```text 37593 * 67 ``` ...numexpr.evaluate("37593 * 67")... ```output 2518731 ``` Answer: 2518731 Question: 37593^(1/5) ```text 37593**(1/5) ``` ...numexpr.evaluate("37593**(1/5)")... ```output 8.222831614237718 ``` Answer: 8.222831614237718 Question: {question} """ PROMPT = PromptTemplate( input_variables=["question"], template=_PROMPT_TEMPLATE, ) # helper functions to handle min and max functions def compute_function(match): func, values = match.groups() # Extract numbers and remove commas from between digits numbers = [float(re.sub(r"(?<=\d),(?=\d)", "", v)) for v in values.split(",")] # Compute the min or max based on the detected function result = min(numbers) if func == "min" else max(numbers) return str(result) class MaxTransformer(ast.NodeTransformer): def visit_Call(self, node): self.generic_visit(node) # Apply the transformation to child nodes first if isinstance(node.func, ast.Name) and node.func.id in ("max", "min"): if all(isinstance(arg, (ast.Num, ast.Constant)) for arg in node.args): # Calculate the max value # print(node.args) args_as_strings = (ast.unparse(arg) for arg in node.args) args_str = ", ".join(args_as_strings) print(args_str) if node.func.id == "min": value = min( arg.n if isinstance(arg, ast.Num) else arg.value for arg in node.args ) else: value = max( arg.n if isinstance(arg, ast.Num) else arg.value for arg in node.args ) # Replace the max call with the max value directly return ast.copy_location(ast.Constant(value=value), node) return node def replace_min_max_functions(expression): # Parse the expression into an AST parsed_expression = ast.parse(expression, mode="eval") # Transform the AST transformer = MaxTransformer() transformed_ast = transformer.visit(parsed_expression) # Fix the missing locations in the AST transformed_ast = ast.fix_missing_locations(transformed_ast) # Compile the transformed AST compiled_code = compile(transformed_ast, "<string>", "eval") # Evaluate the compiled code result = eval(compiled_code) return str(result)
class LLMMathChain(Chain):
0
2023-12-06 21:12:54+00:00
12k
bytedance/ImageDream
extern/ldm_zero123/models/diffusion/ddpm.py
[ { "identifier": "AutoencoderKL", "path": "extern/ldm_zero123/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(\n self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ):\n super().__init__()\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2 * ddconfig[\"z_channels\"], 2 * embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels) == int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(\n inputs,\n reconstructions,\n posterior,\n optimizer_idx,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"train\",\n )\n self.log(\n \"aeloss\",\n aeloss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True,\n )\n self.log_dict(\n log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False\n )\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(\n inputs,\n reconstructions,\n posterior,\n optimizer_idx,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"train\",\n )\n\n self.log(\n \"discloss\",\n discloss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True,\n )\n self.log_dict(\n log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False\n )\n return discloss\n\n def validation_step(self, batch, batch_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(\n inputs,\n reconstructions,\n posterior,\n 0,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"val\",\n )\n\n discloss, log_dict_disc = self.loss(\n inputs,\n reconstructions,\n posterior,\n 1,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"val\",\n )\n\n self.log(\"val/rec_loss\", log_dict_ae[\"val/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n opt_ae = torch.optim.Adam(\n list(self.encoder.parameters())\n + list(self.decoder.parameters())\n + list(self.quant_conv.parameters())\n + list(self.post_quant_conv.parameters()),\n lr=lr,\n betas=(0.5, 0.9),\n )\n opt_disc = torch.optim.Adam(\n self.loss.discriminator.parameters(), lr=lr, betas=(0.5, 0.9)\n )\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.0 * (x - x.min()) / (x.max() - x.min()) - 1.0\n return x" }, { "identifier": "IdentityFirstStage", "path": "extern/ldm_zero123/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "VQModelInterface", "path": "extern/ldm_zero123/models/autoencoder.py", "snippet": "class VQModelInterface(VQModel):\n def __init__(self, embed_dim, *args, **kwargs):\n super().__init__(embed_dim=embed_dim, *args, **kwargs)\n self.embed_dim = embed_dim\n\n def encode(self, x):\n h = self.encoder(x)\n h = self.quant_conv(h)\n return h\n\n def decode(self, h, force_not_quantize=False):\n # also go through quantization layer\n if not force_not_quantize:\n quant, emb_loss, info = self.quantize(h)\n else:\n quant = h\n quant = self.post_quant_conv(quant)\n dec = self.decoder(quant)\n return dec" }, { "identifier": "DDIMSampler", "path": "extern/ldm_zero123/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def to(self, device):\n \"\"\"Same as to in torch module\n Don't really underestand why this isn't a module in the first place\"\"\"\n for k, v in self.__dict__.items():\n if isinstance(v, torch.Tensor):\n new_v = getattr(self, k).to(device)\n setattr(self, k, new_v)\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(\n self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0.0, verbose=True\n ):\n self.ddim_timesteps = make_ddim_timesteps(\n ddim_discr_method=ddim_discretize,\n num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,\n verbose=verbose,\n )\n alphas_cumprod = self.model.alphas_cumprod\n assert (\n alphas_cumprod.shape[0] == self.ddpm_num_timesteps\n ), \"alphas have to be defined for each timestep\"\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer(\"betas\", to_torch(self.model.betas))\n self.register_buffer(\"alphas_cumprod\", to_torch(alphas_cumprod))\n self.register_buffer(\n \"alphas_cumprod_prev\", to_torch(self.model.alphas_cumprod_prev)\n )\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer(\n \"sqrt_alphas_cumprod\", to_torch(np.sqrt(alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_one_minus_alphas_cumprod\",\n to_torch(np.sqrt(1.0 - alphas_cumprod.cpu())),\n )\n self.register_buffer(\n \"log_one_minus_alphas_cumprod\", to_torch(np.log(1.0 - alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_recip_alphas_cumprod\", to_torch(np.sqrt(1.0 / alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_recipm1_alphas_cumprod\",\n to_torch(np.sqrt(1.0 / alphas_cumprod.cpu() - 1)),\n )\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(\n alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,\n verbose=verbose,\n )\n self.register_buffer(\"ddim_sigmas\", ddim_sigmas)\n self.register_buffer(\"ddim_alphas\", ddim_alphas)\n self.register_buffer(\"ddim_alphas_prev\", ddim_alphas_prev)\n self.register_buffer(\"ddim_sqrt_one_minus_alphas\", np.sqrt(1.0 - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev)\n / (1 - self.alphas_cumprod)\n * (1 - self.alphas_cumprod / self.alphas_cumprod_prev)\n )\n self.register_buffer(\n \"ddim_sigmas_for_original_num_steps\", sigmas_for_original_sampling_steps\n )\n\n @torch.no_grad()\n def sample(\n self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.0,\n mask=None,\n x0=None,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n **kwargs,\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list):\n ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(\n f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\"\n )\n\n else:\n if conditioning.shape[0] != batch_size:\n print(\n f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\"\n )\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n # print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(\n conditioning,\n size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask,\n x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(\n self,\n cond,\n shape,\n x_T=None,\n ddim_use_original_steps=False,\n callback=None,\n timesteps=None,\n quantize_denoised=False,\n mask=None,\n x0=None,\n img_callback=None,\n log_every_t=100,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n dynamic_threshold=None,\n t_start=-1,\n ):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = (\n self.ddpm_num_timesteps\n if ddim_use_original_steps\n else self.ddim_timesteps\n )\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = (\n int(\n min(timesteps / self.ddim_timesteps.shape[0], 1)\n * self.ddim_timesteps.shape[0]\n )\n - 1\n )\n timesteps = self.ddim_timesteps[:subset_end]\n\n timesteps = timesteps[:t_start]\n\n intermediates = {\"x_inter\": [img], \"pred_x0\": [img]}\n time_range = (\n reversed(range(0, timesteps))\n if ddim_use_original_steps\n else np.flip(timesteps)\n )\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc=\"DDIM Sampler\", total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(\n x0, ts\n ) # TODO: deterministic forward pass?\n img = img_orig * mask + (1.0 - mask) * img\n\n outs = self.p_sample_ddim(\n img,\n cond,\n ts,\n index=index,\n use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised,\n temperature=temperature,\n noise_dropout=noise_dropout,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n )\n img, pred_x0 = outs\n if callback:\n img = callback(i, img, pred_x0)\n if img_callback:\n img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates[\"x_inter\"].append(img)\n intermediates[\"pred_x0\"].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(\n self,\n x,\n c,\n t,\n index,\n repeat_noise=False,\n use_original_steps=False,\n quantize_denoised=False,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n dynamic_threshold=None,\n ):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.0:\n e_t = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [\n torch.cat([unconditional_conditioning[k][i], c[k][i]])\n for i in range(len(c[k]))\n ]\n else:\n c_in[k] = torch.cat([unconditional_conditioning[k], c[k]])\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(\n self.model, e_t, x, t, c, **corrector_kwargs\n )\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = (\n self.model.alphas_cumprod_prev\n if use_original_steps\n else self.ddim_alphas_prev\n )\n sqrt_one_minus_alphas = (\n self.model.sqrt_one_minus_alphas_cumprod\n if use_original_steps\n else self.ddim_sqrt_one_minus_alphas\n )\n sigmas = (\n self.model.ddim_sigmas_for_original_num_steps\n if use_original_steps\n else self.ddim_sigmas\n )\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full(\n (b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device\n )\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n\n print(t, sqrt_one_minus_at, a_t)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n pred_x0 = norm_thresholding(pred_x0, dynamic_threshold)\n\n # direction pointing to x_t\n dir_xt = (1.0 - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.0:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(\n self,\n x0,\n c,\n t_enc,\n use_original_steps=False,\n return_intermediates=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n ):\n num_reference_steps = (\n self.ddpm_num_timesteps\n if use_original_steps\n else self.ddim_timesteps.shape[0]\n )\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc=\"Encoding Image\"):\n t = torch.full(\n (x0.shape[0],), i, device=self.model.device, dtype=torch.long\n )\n if unconditional_guidance_scale == 1.0:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(\n torch.cat((x_next, x_next)),\n torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c)),\n ),\n 2,\n )\n noise_pred = e_t_uncond + unconditional_guidance_scale * (\n noise_pred - e_t_uncond\n )\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = (\n alphas_next[i].sqrt()\n * ((1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt())\n * noise_pred\n )\n x_next = xt_weighted + weighted_noise_pred\n if (\n return_intermediates\n and i % (num_steps // return_intermediates) == 0\n and i < num_steps - 1\n ):\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n\n out = {\"x_encoded\": x_next, \"intermediate_steps\": inter_steps}\n if return_intermediates:\n out.update({\"intermediates\": intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (\n extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0\n + extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise\n )\n\n @torch.no_grad()\n def decode(\n self,\n x_latent,\n cond,\n t_start,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n use_original_steps=False,\n ):\n timesteps = (\n np.arange(self.ddpm_num_timesteps)\n if use_original_steps\n else self.ddim_timesteps\n )\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc=\"Decoding image\", total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full(\n (x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long\n )\n x_dec, _ = self.p_sample_ddim(\n x_dec,\n cond,\n ts,\n index=index,\n use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n )\n return x_dec" }, { "identifier": "CrossAttention", "path": "extern/ldm_zero123/modules/attention.py", "snippet": "class CrossAttention(nn.Module):\n def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0):\n super().__init__()\n inner_dim = dim_head * heads\n context_dim = default(context_dim, query_dim)\n\n self.scale = dim_head**-0.5\n self.heads = heads\n\n self.to_q = nn.Linear(query_dim, inner_dim, bias=False)\n self.to_k = nn.Linear(context_dim, inner_dim, bias=False)\n self.to_v = nn.Linear(context_dim, inner_dim, bias=False)\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, query_dim), nn.Dropout(dropout)\n )\n\n def forward(self, x, context=None, mask=None):\n h = self.heads\n\n q = self.to_q(x)\n context = default(context, x)\n k = self.to_k(context)\n v = self.to_v(context)\n\n q, k, v = map(lambda t: rearrange(t, \"b n (h d) -> (b h) n d\", h=h), (q, k, v))\n\n sim = einsum(\"b i d, b j d -> b i j\", q, k) * self.scale\n\n if exists(mask):\n mask = rearrange(mask, \"b ... -> b (...)\")\n max_neg_value = -torch.finfo(sim.dtype).max\n mask = repeat(mask, \"b j -> (b h) () j\", h=h)\n sim.masked_fill_(~mask, max_neg_value)\n\n # attention, what we cannot get enough of\n attn = sim.softmax(dim=-1)\n\n out = einsum(\"b i j, b j d -> b i d\", attn, v)\n out = rearrange(out, \"(b h) n d -> b n (h d)\", h=h)\n return self.to_out(out)" }, { "identifier": "extract_into_tensor", "path": "extern/ldm_zero123/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "make_beta_schedule", "path": "extern/ldm_zero123/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(\n schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3\n):\n if schedule == \"linear\":\n betas = (\n torch.linspace(\n linear_start**0.5, linear_end**0.5, n_timestep, dtype=torch.float64\n )\n ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(\n linear_start, linear_end, n_timestep, dtype=torch.float64\n )\n elif schedule == \"sqrt\":\n betas = (\n torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n ** 0.5\n )\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "noise_like", "path": "extern/ldm_zero123/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(\n shape[0], *((1,) * (len(shape) - 1))\n )\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DiagonalGaussianDistribution", "path": "extern/ldm_zero123/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(\n device=self.parameters.device\n )\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(\n device=self.parameters.device\n )\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.0])\n else:\n if other is None:\n return 0.5 * torch.sum(\n torch.pow(self.mean, 2) + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3],\n )\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var\n - 1.0\n - self.logvar\n + other.logvar,\n dim=[1, 2, 3],\n )\n\n def nll(self, sample, dims=[1, 2, 3]):\n if self.deterministic:\n return torch.Tensor([0.0])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims,\n )\n\n def mode(self):\n return self.mean" }, { "identifier": "normal_kl", "path": "extern/ldm_zero123/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "LitEma", "path": "extern/ldm_zero123/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError(\"Decay must be between 0 and 1\")\n\n self.m_name2s_name = {}\n self.register_buffer(\"decay\", torch.tensor(decay, dtype=torch.float32))\n self.register_buffer(\n \"num_updates\",\n torch.tensor(0, dtype=torch.int)\n if use_num_upates\n else torch.tensor(-1, dtype=torch.int),\n )\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n # remove as '.'-character is not allowed in buffers\n s_name = name.replace(\".\", \"\")\n self.m_name2s_name.update({name: s_name})\n self.register_buffer(s_name, p.clone().detach().data)\n\n self.collected_params = []\n\n def forward(self, model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(\n one_minus_decay * (shadow_params[sname] - m_param[key])\n )\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "count_params", "path": "extern/ldm_zero123/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "default", "path": "extern/ldm_zero123/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "exists", "path": "extern/ldm_zero123/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "instantiate_from_config", "path": "extern/ldm_zero123/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == \"__is_first_stage__\":\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "isimage", "path": "extern/ldm_zero123/util.py", "snippet": "def isimage(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "ismap", "path": "extern/ldm_zero123/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "log_txt_as_img", "path": "extern/ldm_zero123/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype(\"data/DejaVuSans.ttf\", size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(\n xc[bi][start : start + nc] for start in range(0, len(xc[bi]), nc)\n )\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "mean_flat", "path": "extern/ldm_zero123/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" } ]
import itertools import numpy as np import pytorch_lightning as pl import torch import torch.nn as nn from contextlib import contextmanager, nullcontext from functools import partial from einops import rearrange, repeat from omegaconf import ListConfig from pytorch_lightning.utilities.rank_zero import rank_zero_only from torch.optim.lr_scheduler import LambdaLR from torchvision.utils import make_grid from tqdm import tqdm from extern.ldm_zero123.models.autoencoder import ( AutoencoderKL, IdentityFirstStage, VQModelInterface, ) from extern.ldm_zero123.models.diffusion.ddim import DDIMSampler from extern.ldm_zero123.modules.attention import CrossAttention from extern.ldm_zero123.modules.diffusionmodules.util import ( extract_into_tensor, make_beta_schedule, noise_like, ) from extern.ldm_zero123.modules.distributions.distributions import ( DiagonalGaussianDistribution, normal_kl, ) from extern.ldm_zero123.modules.ema import LitEma from extern.ldm_zero123.util import ( count_params, default, exists, instantiate_from_config, isimage, ismap, log_txt_as_img, mean_flat, )
10,530
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {"concat": "c_concat", "crossattn": "c_crossattn", "adm": "y"} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__( self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0.0, v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1.0, conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0.0, make_it_fit=False, ucg_training=None, ): super().__init__() assert parameterization in [ "eps", "x0", ], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print( f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode" ) self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema:
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {"concat": "c_concat", "crossattn": "c_crossattn", "adm": "y"} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__( self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0.0, v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1.0, conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0.0, make_it_fit=False, ucg_training=None, ): super().__init__() assert parameterization in [ "eps", "x0", ], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print( f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode" ) self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema:
self.model_ema = LitEma(self.model)
10
2023-12-13 21:09:37+00:00
12k
TencentARC/MotionCtrl
app.py
[ { "identifier": "CAMERA_MOTION_MODE", "path": "gradio_utils/camera_utils.py", "snippet": "CAMERA_MOTION_MODE = [\"Basic Camera Poses\", \"Provided Complex Camera Poses\", \"Custom Camera Poses\"]" }, { "identifier": "process_camera", "path": "gradio_utils/camera_utils.py", "snippet": "def process_camera(camera_dict):\n # \"First A then B\", \"Both A and B\", \"Custom\"\n if camera_dict['complex'] is not None:\n with open(COMPLEX_CAMERA[camera_dict['complex']]) as f:\n RT = json.load(f) # [16, 12]\n RT = np.array(RT).reshape(-1, 3, 4)\n print(RT.shape)\n return RT\n\n\n motion_list = camera_dict['motion']\n mode = camera_dict['mode']\n speed = camera_dict['speed']\n print(len(motion_list))\n if len(motion_list) == 0:\n angle = np.array([0,0,0])\n T = np.array([0,0,0])\n RT = get_camera_motion(angle, T, speed, 16)\n\n\n elif len(motion_list) == 1:\n angle = np.array(CAMERA[motion_list[0]][\"angle\"])\n T = np.array(CAMERA[motion_list[0]][\"T\"])\n print(angle, T)\n RT = get_camera_motion(angle, T, speed, 16)\n \n \n \n elif len(motion_list) == 2:\n if mode == \"Customized Mode 1: First A then B\":\n angle = np.array(CAMERA[motion_list[0]][\"angle\"]) \n T = np.array(CAMERA[motion_list[0]][\"T\"]) \n RT_0 = get_camera_motion(angle, T, speed, 8)\n\n angle = np.array(CAMERA[motion_list[1]][\"angle\"]) \n T = np.array(CAMERA[motion_list[1]][\"T\"]) \n RT_1 = get_camera_motion(angle, T, speed, 8)\n\n RT = combine_camera_motion(RT_0, RT_1)\n\n elif mode == \"Customized Mode 2: Both A and B\":\n angle = np.array(CAMERA[motion_list[0]][\"angle\"]) + np.array(CAMERA[motion_list[1]][\"angle\"])\n T = np.array(CAMERA[motion_list[0]][\"T\"]) + np.array(CAMERA[motion_list[1]][\"T\"])\n RT = get_camera_motion(angle, T, speed, 16)\n\n\n # return RT.reshape(-1, 12)\n return RT" }, { "identifier": "OBJECT_MOTION_MODE", "path": "gradio_utils/traj_utils.py", "snippet": "OBJECT_MOTION_MODE = [\"Provided Trajectory\", \"Custom Trajectory\"]" }, { "identifier": "get_provided_traj", "path": "gradio_utils/traj_utils.py", "snippet": "def get_provided_traj(traj_name):\n traj = read_points(PROVIDED_TRAJS[traj_name])\n # xrange from 256 to 1024\n traj = [[int(1024*x/256), int(1024*y/256)] for x,y in traj]\n return traj" }, { "identifier": "process_points", "path": "gradio_utils/traj_utils.py", "snippet": "def process_points(points):\n frames = 16\n defualt_points = [[512,512]]*16\n\n if len(points) < 2:\n return defualt_points\n elif len(points) >= frames:\n skip = len(points)//frames\n return points[::skip][:15] + points[-1:]\n else:\n insert_num = frames - len(points)\n insert_num_dict = {}\n interval = len(points) - 1\n n = insert_num // interval\n m = insert_num % interval\n for i in range(interval):\n insert_num_dict[i] = n\n for i in range(m):\n insert_num_dict[i] += 1\n\n res = []\n for i in range(interval):\n insert_points = []\n x0,y0 = points[i]\n x1,y1 = points[i+1]\n\n delta_x = x1 - x0\n delta_y = y1 - y0\n for j in range(insert_num_dict[i]):\n x = x0 + (j+1)/(insert_num_dict[i]+1)*delta_x\n y = y0 + (j+1)/(insert_num_dict[i]+1)*delta_y\n insert_points.append([int(x), int(y)])\n\n res += points[i:i+1] + insert_points\n res += points[-1:]\n return res" }, { "identifier": "process_traj", "path": "gradio_utils/traj_utils.py", "snippet": "def process_traj(points, device='cpu'):\n xy_range = 1024\n points = process_points(points)\n points = [[int(256*x/xy_range), int(256*y/xy_range)] for x,y in points]\n \n optical_flow = get_flow(points)\n # optical_flow = torch.tensor(optical_flow).to(device)\n\n return optical_flow" }, { "identifier": "vis_camera", "path": "gradio_utils/utils.py", "snippet": "def vis_camera(RT_list, rescale_T=1):\n fig = go.Figure()\n showticklabels = True\n visible = True\n scene_bounds = 2\n base_radius = 2.5\n zoom_scale = 1.5\n fov_deg = 50.0\n \n edges = [(0, 1), (0, 2), (0, 3), (1, 2), (2, 3), (3, 1), (3, 4)] \n \n colors = px.colors.qualitative.Plotly\n \n cone_list = []\n n = len(RT_list)\n for i, RT in enumerate(RT_list):\n R = RT[:,:3]\n T = RT[:,-1]/rescale_T\n cone = calc_cam_cone_pts_3d(R, T, fov_deg)\n cone_list.append((cone, (i*1/n, \"green\"), f\"view_{i}\"))\n\n \n for (cone, clr, legend) in cone_list:\n for (i, edge) in enumerate(edges):\n (x1, x2) = (cone[edge[0], 0], cone[edge[1], 0])\n (y1, y2) = (cone[edge[0], 1], cone[edge[1], 1])\n (z1, z2) = (cone[edge[0], 2], cone[edge[1], 2])\n fig.add_trace(go.Scatter3d(\n x=[x1, x2], y=[y1, y2], z=[z1, z2], mode='lines',\n line=dict(color=clr, width=3),\n name=legend, showlegend=(i == 0))) \n fig.update_layout(\n height=500,\n autosize=True,\n # hovermode=False,\n margin=go.layout.Margin(l=0, r=0, b=0, t=0),\n \n showlegend=True,\n legend=dict(\n yanchor='bottom',\n y=0.01,\n xanchor='right',\n x=0.99,\n ),\n scene=dict(\n aspectmode='manual',\n aspectratio=dict(x=1, y=1, z=1.0),\n camera=dict(\n center=dict(x=0.0, y=0.0, z=0.0),\n up=dict(x=0.0, y=-1.0, z=0.0),\n eye=dict(x=scene_bounds/2, y=-scene_bounds/2, z=-scene_bounds/2),\n ),\n\n xaxis=dict(\n range=[-scene_bounds, scene_bounds],\n showticklabels=showticklabels,\n visible=visible,\n ),\n \n \n yaxis=dict(\n range=[-scene_bounds, scene_bounds],\n showticklabels=showticklabels,\n visible=visible,\n ),\n \n \n zaxis=dict(\n range=[-scene_bounds, scene_bounds],\n showticklabels=showticklabels,\n visible=visible,\n )\n ))\n return fig" }, { "identifier": "DDIMSampler", "path": "lvdm/models/samplers/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n self.counter = 0\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n schedule_verbose=False,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs\n ):\n \n # check condition bs\n if conditioning is not None:\n if isinstance(conditioning, dict):\n try:\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n except:\n cbs = conditioning[list(conditioning.keys())[0]][0].shape[0]\n\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=schedule_verbose)\n \n # make shape\n if len(shape) == 3:\n C, H, W = shape\n size = (batch_size, C, H, W)\n elif len(shape) == 4:\n C, T, H, W = shape\n size = (batch_size, C, T, H, W)\n # print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n \n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n verbose=verbose,\n **kwargs)\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, verbose=True,\n **kwargs):\n device = self.model.betas.device \n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n \n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n \n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n if verbose:\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n else:\n iterator = time_range\n\n clean_cond = kwargs.pop(\"clean_cond\", False)\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n # use mask to blend noised original latent (img_orig) & new sampled latent (img)\n if mask is not None:\n assert x0 is not None\n if clean_cond:\n img_orig = x0\n else:\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass? <ddim inversion>\n img = img_orig * mask + (1. - mask) * img # keep original & modify use img\n \n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n **kwargs)\n \n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n uc_type=None, conditional_guidance_scale_temporal=None, **kwargs):\n b, *_, device = *x.shape, x.device\n if x.dim() == 5:\n is_video = True\n else:\n is_video = False\n # f=open('/apdcephfs_cq2/share_1290939/yingqinghe/code/LVDM-private/cfg_range_s5noclamp.txt','a')\n # print(f't={t}, model input, min={torch.min(x)}, max={torch.max(x)}',file=f)\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n e_t = self.model.apply_model(x, t, c, **kwargs) # unet denoiser\n else:\n # with unconditional condition\n if isinstance(c, torch.Tensor):\n un_kwargs = kwargs.copy()\n if isinstance(unconditional_conditioning, dict):\n for uk, uv in unconditional_conditioning.items():\n if uk in un_kwargs:\n un_kwargs[uk] = uv\n unconditional_conditioning = unconditional_conditioning['uc']\n if 'cond_T' in kwargs and t < kwargs['cond_T']:\n if 'features_adapter' in kwargs:\n kwargs.pop('features_adapter')\n un_kwargs.pop('features_adapter')\n # kwargs['features_adapter'] = None\n # un_kwargs['features_adapter'] = None\n # if 'pose_emb' in kwargs:\n # kwargs.pop('pose_emb')\n # un_kwargs.pop('pose_emb')\n # kwargs['pose_emb'] = None\n # un_kwargs['pose_emb'] = None\n e_t = self.model.apply_model(x, t, c, **kwargs)\n # e_t_uncond = self.model.apply_model(x, t, unconditional_conditioning, **kwargs)\n e_t_uncond = self.model.apply_model(x, t, unconditional_conditioning, **un_kwargs)\n elif isinstance(c, dict):\n e_t = self.model.apply_model(x, t, c, **kwargs)\n e_t_uncond = self.model.apply_model(x, t, unconditional_conditioning, **kwargs)\n else:\n raise NotImplementedError\n # text cfg\n if uc_type is None:\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n else:\n if uc_type == 'cfg_original':\n e_t = e_t + unconditional_guidance_scale * (e_t - e_t_uncond)\n elif uc_type == 'cfg_ours':\n e_t = e_t + unconditional_guidance_scale * (e_t_uncond - e_t)\n else:\n raise NotImplementedError\n # temporal guidance\n if conditional_guidance_scale_temporal is not None:\n e_t_temporal = self.model.apply_model(x, t, c, **kwargs)\n e_t_image = self.model.apply_model(x, t, c, no_temporal_attn=True, **kwargs)\n e_t = e_t + conditional_guidance_scale_temporal * (e_t_temporal - e_t_image)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n \n if is_video:\n size = (b, 1, 1, 1, 1)\n else:\n size = (b, 1, 1, 1)\n a_t = torch.full(size, alphas[index], device=device)\n a_prev = torch.full(size, alphas_prev[index], device=device)\n sigma_t = torch.full(size, sigmas[index], device=device)\n sqrt_one_minus_at = torch.full(size, sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n # print(f't={t}, pred_x0, min={torch.min(pred_x0)}, max={torch.max(pred_x0)}',file=f)\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n # # norm pred_x0\n # p=2\n # s=()\n # pred_x0 = pred_x0 - torch.max(torch.abs(pred_x0))\n\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n \n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n\n return x_prev, pred_x0" }, { "identifier": "DEFAULT_NEGATIVE_PROMPT", "path": "main/evaluation/motionctrl_inference.py", "snippet": "DEFAULT_NEGATIVE_PROMPT = 'blur, haze, deformed iris, deformed pupils, semi-realistic, cgi, 3d, render, '\\\n 'sketch, cartoon, drawing, anime, mutated hands and fingers, deformed, distorted, '\\\n 'disfigured, poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, '\\\n 'floating limbs, disconnected limbs, mutation, mutated, ugly, disgusting, amputation'\n RT = camera_poses[..., None]\n RT = None\ndef load_model_checkpoint(model, ckpt, adapter_ckpt=None):\ndef load_trajs(cond_dir, trajs):\ndef load_camera_pose(cond_dir, camera_poses):\ndef save_results(samples, filename, savedir, fps=10):\ndef motionctrl_sample(\n model, \n prompts, \n noise_shape,\n camera_poses=None, \n trajs=None,\n n_samples=1,\n unconditional_guidance_scale=1.0,\n unconditional_guidance_scale_temporal=None,\n ddim_steps=50,\n ddim_eta=1.,\n **kwargs):\ndef run_inference(args, gpu_num, gpu_no):\ndef save_images(samples, savedir):\ndef get_parser():" }, { "identifier": "instantiate_from_config", "path": "utils/utils.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" } ]
import argparse import os import tempfile import cv2 import gradio as gr import imageio import numpy as np import torch import torchvision from functools import partial from omegaconf import OmegaConf from PIL import Image from pytorch_lightning import seed_everything from gradio_utils.camera_utils import CAMERA_MOTION_MODE, process_camera from gradio_utils.traj_utils import (OBJECT_MOTION_MODE, get_provided_traj, process_points, process_traj) from gradio_utils.utils import vis_camera from lvdm.models.samplers.ddim import DDIMSampler from main.evaluation.motionctrl_inference import (DEFAULT_NEGATIVE_PROMPT, load_model_checkpoint, post_prompt) from utils.utils import instantiate_from_config
8,274
global camera_dict if camera_dict['complex'] is not None: camera_dict['complex'] = None if camera_mode == CAMERA_MOTION_MODE[2] and len(camera_dict['motion']) <2: camera_dict['motion'].append(camera_motion) else: camera_dict['motion']=[camera_motion] return display_camera_info(camera_dict, camera_mode) def add_complex_camera_motion(camera_motion): global camera_dict camera_dict['complex']=camera_motion return display_camera_info(camera_dict) def change_camera_mode(combine_type, camera_mode): global camera_dict camera_dict['mode'] = combine_type return display_camera_info(camera_dict, camera_mode) def change_camera_speed(camera_speed): global camera_dict camera_dict['speed'] = camera_speed return display_camera_info(camera_dict) def reset_camera(): global camera_dict camera_dict = { "motion":[], "mode": "Customized Mode 1: First A then B", "speed": 1.0, "complex": None } return display_camera_info(camera_dict) def fn_traj_droplast(): global traj_list if traj_list: traj_list.pop() if traj_list: traj_str = [f"{traj}" for traj in traj_list] return ", ".join(traj_str) else: return "Click to specify trajectory" def fn_traj_reset(): global traj_list traj_list = [] return "Click to specify trajectory" ########################################### model_path='./checkpoints/motionctrl.pth' config_path='./configs/inference/config_both.yaml' config = OmegaConf.load(config_path) model_config = config.pop("model", OmegaConf.create()) model = instantiate_from_config(model_config) if torch.cuda.is_available(): model = model.cuda() model = load_model_checkpoint(model, model_path) model.eval() def model_run(prompts, infer_mode, seed, n_samples): global traj_list global camera_dict RT = process_camera(camera_dict).reshape(-1,12) traj_flow = process_traj(traj_list).transpose(3,0,1,2) print(prompts) print(RT.shape) print(traj_flow.shape) noise_shape = [1, 4, 16, 32, 32] unconditional_guidance_scale = 7.5 unconditional_guidance_scale_temporal = None # n_samples = 1 ddim_steps= 50 ddim_eta=1.0 cond_T=800 if n_samples < 1: n_samples = 1 if n_samples > 4: n_samples = 4 seed_everything(seed) if infer_mode == MODE[0]: camera_poses = RT camera_poses = torch.tensor(camera_poses).float() camera_poses = camera_poses.unsqueeze(0) trajs = None if torch.cuda.is_available(): camera_poses = camera_poses.cuda() elif infer_mode == MODE[1]: trajs = traj_flow trajs = torch.tensor(trajs).float() trajs = trajs.unsqueeze(0) camera_poses = None if torch.cuda.is_available(): trajs = trajs.cuda() else: camera_poses = RT trajs = traj_flow camera_poses = torch.tensor(camera_poses).float() trajs = torch.tensor(trajs).float() camera_poses = camera_poses.unsqueeze(0) trajs = trajs.unsqueeze(0) if torch.cuda.is_available(): camera_poses = camera_poses.cuda() trajs = trajs.cuda()
os.environ['KMP_DUPLICATE_LIB_OK']='True' SPACE_ID = os.environ.get('SPACE_ID', '') #### Description #### title = r"""<h1 align="center">MotionCtrl: A Unified and Flexible Motion Controller for Video Generation</h1>""" description = r""" <b>Official Gradio demo</b> for <a href='https://github.com/TencentARC/MotionCtrl' target='_blank'><b>MotionCtrl: A Unified and Flexible Motion Controller for Video Generation</b></a>.<br> 🔥 MotionCtrl is capable of independently and flexibly controling the camera motion and object motion of a generated video, with only a unified model.<br> 🤗 Try to control the motion of the generated videos yourself!<br> ❗❗❗ Please note that current version of **MotionCtrl** is deployed on **LVDM/VideoCrafter**. The versions that depolyed on **AnimateDiff** and **SVD** will be released soon.<br> """ article = r""" If MotionCtrl is helpful, please help to ⭐ the <a href='https://github.com/TencentARC/MotionCtrl' target='_blank'>Github Repo</a>. Thanks! [![GitHub Stars](https://img.shields.io/github/stars/TencentARC%2FMotionCtrl )](https://github.com/TencentARC/MotionCtrl) --- 📝 **Citation** <br> If our work is useful for your research, please consider citing: ```bibtex @inproceedings{wang2023motionctrl, title={MotionCtrl: A Unified and Flexible Motion Controller for Video Generation}, author={Wang, Zhouxia and Yuan, Ziyang and Wang, Xintao and Chen, Tianshui and Xia, Menghan and Luo, Ping and Shan, Yin}, booktitle={arXiv preprint arXiv:2312.03641}, year={2023} } ``` 📧 **Contact** <br> If you have any questions, please feel free to reach me out at <b>[email protected]</b>. """ css = """ .gradio-container {width: 85% !important} .gr-monochrome-group {border-radius: 5px !important; border: revert-layer !important; border-width: 2px !important; color: black !important;} span.svelte-s1r2yt {font-size: 17px !important; font-weight: bold !important; color: #d30f2f !important;} button {border-radius: 8px !important;} .add_button {background-color: #4CAF50 !important;} .remove_button {background-color: #f44336 !important;} .clear_button {background-color: gray !important;} .mask_button_group {gap: 10px !important;} .video {height: 300px !important;} .image {height: 300px !important;} .video .wrap.svelte-lcpz3o {display: flex !important; align-items: center !important; justify-content: center !important;} .video .wrap.svelte-lcpz3o > :first-child {height: 100% !important;} .margin_center {width: 50% !important; margin: auto !important;} .jc_center {justify-content: center !important;} """ T_base = [ [1.,0.,0.], ## W2C left [-1.,0.,0.], ## W2C right [0., 1., 0.], ## W2C up [0.,-1.,0.], ## W2C down [0.,0.,1.], ## W2C zoom out [0.,0.,-1.], ## W2C zoom in ] radius = 1 n = 16 # step = look_at = np.array([0, 0, 0.8]).reshape(3,1) # look_at = np.array([0, 0, 0.2]).reshape(3,1) T_list = [] base_R = np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]) res = [] res_forsave = [] T_range = 1.8 for i in range(0, 16): # theta = (1)*np.pi*i/n R = base_R[:,:3] T = np.array([0.,0.,1.]).reshape(3,1) * (i/n)*2 RT = np.concatenate([R,T], axis=1) res.append(RT) fig = vis_camera(res) # MODE = ["camera motion control", "object motion control", "camera + object motion control"] MODE = ["control camera poses", "control object trajectory", "control both camera and object motion"] BASE_MODEL = ['LVDM/VideoCrafter', 'AnimateDiff', 'SVD'] traj_list = [] camera_dict = { "motion":[], "mode": "Customized Mode 1: First A then B", # "First A then B", "Both A and B", "Custom" "speed": 1.0, "complex": None } def fn_vis_camera(info_mode): global camera_dict RT = process_camera(camera_dict) # [t, 3, 4] if camera_dict['complex'] is not None: # rescale T to [-2,2] for i in range(3): min_T = np.min(RT[:,i,-1]) max_T = np.max(RT[:,i,-1]) if min_T < -2 or max_T > 2: RT[:,i,-1] = RT[:,i,-1] - min_T RT[:,i,-1] = RT[:,i,-1] / (np.max(RT[:,:,-1]) + 1e-6) RT[:,i,-1] = RT[:,i,-1] * 4 RT[:,i,-1] = RT[:,i,-1] - 2 fig = vis_camera(RT) if info_mode == MODE[0]: vis_step3_prompt_generate = True vis_prompt = True vis_num_samples = True vis_seed = True vis_start = True vis_gen_video = True vis_object_mode = False vis_object_info = False else: vis_step3_prompt_generate = False vis_prompt = False vis_num_samples = False vis_seed = False vis_start = False vis_gen_video = False vis_object_mode = True vis_object_info = True return fig, \ gr.update(visible=vis_object_mode), \ gr.update(visible=vis_object_info), \ gr.update(visible=vis_step3_prompt_generate), \ gr.update(visible=vis_prompt), \ gr.update(visible=vis_num_samples), \ gr.update(visible=vis_seed), \ gr.update(visible=vis_start), \ gr.update(visible=vis_gen_video, value=None) def fn_vis_traj(): global traj_list xy_range = 1024 points = process_points(traj_list) imgs = [] for idx in range(16): bg_img = np.ones((1024, 1024, 3), dtype=np.uint8) * 255 for i in range(15): p = points[i] p1 = points[i+1] cv2.line(bg_img, p, p1, (255, 0, 0), 2) if i == idx: cv2.circle(bg_img, p, 2, (0, 255, 0), 20) if idx==(15): cv2.circle(bg_img, points[-1], 2, (0, 255, 0), 20) imgs.append(bg_img.astype(np.uint8)) # size = (512, 512) fps = 10 path = tempfile.NamedTemporaryFile(suffix='.mp4', delete=False).name writer = imageio.get_writer(path, format='mp4', mode='I', fps=fps) for img in imgs: writer.append_data(img) writer.close() vis_step3_prompt_generate = True vis_prompt = True vis_num_samples = True vis_seed = True vis_start = True vis_gen_video = True return path, gr.update(visible=vis_step3_prompt_generate), \ gr.update(visible=vis_prompt), \ gr.update(visible=vis_num_samples), \ gr.update(visible=vis_seed), \ gr.update(visible=vis_start), \ gr.update(visible=vis_gen_video, value=None) def display_camera_info(camera_dict, camera_mode=None): if camera_dict['complex'] is not None: res = f"complex : {camera_dict['complex']}. " else: res = "" res += f"motion : {[_ for _ in camera_dict['motion']]}. " res += f"speed : {camera_dict['speed']}. " if camera_mode == CAMERA_MOTION_MODE[2]: res += f"mode : {camera_dict['mode']}. " return res def add_traj_point(evt: gr.SelectData, ): global traj_list traj_list.append(evt.index) traj_str = [f"{traj}" for traj in traj_list] return ", ".join(traj_str) def add_provided_traj(traj_name): global traj_list traj_list = get_provided_traj(traj_name) traj_str = [f"{traj}" for traj in traj_list] return ", ".join(traj_str) def add_camera_motion(camera_motion, camera_mode): global camera_dict if camera_dict['complex'] is not None: camera_dict['complex'] = None if camera_mode == CAMERA_MOTION_MODE[2] and len(camera_dict['motion']) <2: camera_dict['motion'].append(camera_motion) else: camera_dict['motion']=[camera_motion] return display_camera_info(camera_dict, camera_mode) def add_complex_camera_motion(camera_motion): global camera_dict camera_dict['complex']=camera_motion return display_camera_info(camera_dict) def change_camera_mode(combine_type, camera_mode): global camera_dict camera_dict['mode'] = combine_type return display_camera_info(camera_dict, camera_mode) def change_camera_speed(camera_speed): global camera_dict camera_dict['speed'] = camera_speed return display_camera_info(camera_dict) def reset_camera(): global camera_dict camera_dict = { "motion":[], "mode": "Customized Mode 1: First A then B", "speed": 1.0, "complex": None } return display_camera_info(camera_dict) def fn_traj_droplast(): global traj_list if traj_list: traj_list.pop() if traj_list: traj_str = [f"{traj}" for traj in traj_list] return ", ".join(traj_str) else: return "Click to specify trajectory" def fn_traj_reset(): global traj_list traj_list = [] return "Click to specify trajectory" ########################################### model_path='./checkpoints/motionctrl.pth' config_path='./configs/inference/config_both.yaml' config = OmegaConf.load(config_path) model_config = config.pop("model", OmegaConf.create()) model = instantiate_from_config(model_config) if torch.cuda.is_available(): model = model.cuda() model = load_model_checkpoint(model, model_path) model.eval() def model_run(prompts, infer_mode, seed, n_samples): global traj_list global camera_dict RT = process_camera(camera_dict).reshape(-1,12) traj_flow = process_traj(traj_list).transpose(3,0,1,2) print(prompts) print(RT.shape) print(traj_flow.shape) noise_shape = [1, 4, 16, 32, 32] unconditional_guidance_scale = 7.5 unconditional_guidance_scale_temporal = None # n_samples = 1 ddim_steps= 50 ddim_eta=1.0 cond_T=800 if n_samples < 1: n_samples = 1 if n_samples > 4: n_samples = 4 seed_everything(seed) if infer_mode == MODE[0]: camera_poses = RT camera_poses = torch.tensor(camera_poses).float() camera_poses = camera_poses.unsqueeze(0) trajs = None if torch.cuda.is_available(): camera_poses = camera_poses.cuda() elif infer_mode == MODE[1]: trajs = traj_flow trajs = torch.tensor(trajs).float() trajs = trajs.unsqueeze(0) camera_poses = None if torch.cuda.is_available(): trajs = trajs.cuda() else: camera_poses = RT trajs = traj_flow camera_poses = torch.tensor(camera_poses).float() trajs = torch.tensor(trajs).float() camera_poses = camera_poses.unsqueeze(0) trajs = trajs.unsqueeze(0) if torch.cuda.is_available(): camera_poses = camera_poses.cuda() trajs = trajs.cuda()
ddim_sampler = DDIMSampler(model)
7
2023-12-06 07:27:45+00:00
12k
TianxingWu/FreeInit
examples/AnimateDiff/animatediff/pipelines/pipeline_animation.py
[ { "identifier": "UNet3DConditionModel", "path": "examples/AnimateDiff/animatediff/models/unet.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,\n in_channels: int = 4,\n out_channels: int = 4,\n center_input_sample: bool = False,\n flip_sin_to_cos: bool = True,\n freq_shift: int = 0, \n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\",\n ),\n mid_block_type: str = \"UNetMidBlock3DCrossAttn\",\n up_block_types: Tuple[str] = (\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ),\n only_cross_attention: Union[bool, Tuple[bool]] = False,\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: int = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1280,\n attention_head_dim: Union[int, Tuple[int]] = 8,\n dual_cross_attention: bool = False,\n use_linear_projection: bool = False,\n class_embed_type: Optional[str] = None,\n num_class_embeds: Optional[int] = None,\n upcast_attention: bool = False,\n resnet_time_scale_shift: str = \"default\",\n \n use_inflated_groupnorm=False,\n \n # Additional\n use_motion_module = False,\n motion_module_resolutions = ( 1,2,4,8 ),\n motion_module_mid_block = False,\n motion_module_decoder_only = False,\n motion_module_type = None,\n motion_module_kwargs = {},\n unet_use_cross_frame_attention = None,\n unet_use_temporal_attention = None,\n ):\n super().__init__()\n \n self.sample_size = sample_size\n time_embed_dim = block_out_channels[0] * 4\n\n # input\n self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))\n\n # time\n self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n\n # class embedding\n if class_embed_type is None and num_class_embeds is not None:\n self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)\n elif class_embed_type == \"timestep\":\n self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n elif class_embed_type == \"identity\":\n self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)\n else:\n self.class_embedding = None\n\n self.down_blocks = nn.ModuleList([])\n self.mid_block = None\n self.up_blocks = nn.ModuleList([])\n\n if isinstance(only_cross_attention, bool):\n only_cross_attention = [only_cross_attention] * len(down_block_types)\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n for i, down_block_type in enumerate(down_block_types):\n res = 2 ** i\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[i],\n downsample_padding=downsample_padding,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n use_inflated_groupnorm=use_inflated_groupnorm,\n \n use_motion_module=use_motion_module and (res in motion_module_resolutions) and (not motion_module_decoder_only),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.down_blocks.append(down_block)\n\n # mid\n if mid_block_type == \"UNetMidBlock3DCrossAttn\":\n self.mid_block = UNetMidBlock3DCrossAttn(\n in_channels=block_out_channels[-1],\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n resnet_time_scale_shift=resnet_time_scale_shift,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n use_inflated_groupnorm=use_inflated_groupnorm,\n \n use_motion_module=use_motion_module and motion_module_mid_block,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n else:\n raise ValueError(f\"unknown mid_block_type : {mid_block_type}\")\n \n # count how many layers upsample the videos\n self.num_upsamplers = 0\n\n # up\n reversed_block_out_channels = list(reversed(block_out_channels))\n reversed_attention_head_dim = list(reversed(attention_head_dim))\n only_cross_attention = list(reversed(only_cross_attention))\n output_channel = reversed_block_out_channels[0]\n for i, up_block_type in enumerate(up_block_types):\n res = 2 ** (3 - i)\n is_final_block = i == len(block_out_channels) - 1\n\n prev_output_channel = output_channel\n output_channel = reversed_block_out_channels[i]\n input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]\n\n # add upsample block for all BUT final layer\n if not is_final_block:\n add_upsample = True\n self.num_upsamplers += 1\n else:\n add_upsample = False\n\n up_block = get_up_block(\n up_block_type,\n num_layers=layers_per_block + 1,\n in_channels=input_channel,\n out_channels=output_channel,\n prev_output_channel=prev_output_channel,\n temb_channels=time_embed_dim,\n add_upsample=add_upsample,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=reversed_attention_head_dim[i],\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n use_inflated_groupnorm=use_inflated_groupnorm,\n\n use_motion_module=use_motion_module and (res in motion_module_resolutions),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.up_blocks.append(up_block)\n prev_output_channel = output_channel\n\n # out\n if use_inflated_groupnorm:\n self.conv_norm_out = InflatedGroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)\n else:\n self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)\n self.conv_act = nn.SiLU()\n self.conv_out = InflatedConv3d(block_out_channels[0], out_channels, kernel_size=3, padding=1)\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maxium amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_slicable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_slicable_dims(module)\n\n num_slicable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_slicable_layers * [1]\n\n slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n class_labels: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n return_dict: bool = True,\n ) -> Union[UNet3DConditionOutput, Tuple]:\n r\"\"\"\n Args:\n sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor\n timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps\n encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.\n\n Returns:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When\n returning a tuple, the first element is the sample tensor.\n \"\"\"\n # By default samples have to be AT least a multiple of the overall upsampling factor.\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layears).\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\n # on the fly if necessary.\n default_overall_up_factor = 2**self.num_upsamplers\n\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\n forward_upsample_size = False\n upsample_size = None\n\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\n logger.info(\"Forward upsample size to force interpolation output size.\")\n forward_upsample_size = True\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # center input if necessary\n if self.config.center_input_sample:\n sample = 2 * sample - 1.0\n\n # time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n emb = self.time_embedding(t_emb)\n\n if self.class_embedding is not None:\n if class_labels is None:\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\n\n if self.config.class_embed_type == \"timestep\":\n class_labels = self.time_proj(class_labels)\n\n class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)\n emb = emb + class_emb\n\n # pre-process\n sample = self.conv_in(sample)\n\n # down\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states)\n\n down_block_res_samples += res_samples\n\n # mid\n sample = self.mid_block(\n sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask\n )\n\n # up\n for i, upsample_block in enumerate(self.up_blocks):\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-len(upsample_block.resnets) :]\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\n\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states,\n upsample_size=upsample_size,\n attention_mask=attention_mask,\n )\n else:\n sample = upsample_block(\n hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size, encoder_hidden_states=encoder_hidden_states,\n )\n\n # post-process\n sample = self.conv_norm_out(sample)\n sample = self.conv_act(sample)\n sample = self.conv_out(sample)\n\n if not return_dict:\n return (sample,)\n\n return UNet3DConditionOutput(sample=sample)\n\n @classmethod\n def from_pretrained_2d(cls, pretrained_model_path, subfolder=None, unet_additional_kwargs=None):\n if subfolder is not None:\n pretrained_model_path = os.path.join(pretrained_model_path, subfolder)\n print(f\"loaded temporal unet's pretrained weights from {pretrained_model_path} ...\")\n\n config_file = os.path.join(pretrained_model_path, 'config.json')\n if not os.path.isfile(config_file):\n raise RuntimeError(f\"{config_file} does not exist\")\n with open(config_file, \"r\") as f:\n config = json.load(f)\n config[\"_class_name\"] = cls.__name__\n config[\"down_block_types\"] = [\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\"\n ]\n config[\"up_block_types\"] = [\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ]\n\n from diffusers.utils import WEIGHTS_NAME\n model = cls.from_config(config, **unet_additional_kwargs)\n model_file = os.path.join(pretrained_model_path, WEIGHTS_NAME)\n if not os.path.isfile(model_file):\n raise RuntimeError(f\"{model_file} does not exist\")\n state_dict = torch.load(model_file, map_location=\"cpu\")\n\n m, u = model.load_state_dict(state_dict, strict=False)\n print(f\"### missing keys: {len(m)}; \\n### unexpected keys: {len(u)};\")\n # print(f\"### missing keys:\\n{m}\\n### unexpected keys:\\n{u}\\n\")\n \n params = [p.numel() if \"temporal\" in n else 0 for n, p in model.named_parameters()]\n print(f\"### Temporal Module Parameters: {sum(params) / 1e6} M\")\n \n return model" }, { "identifier": "get_freq_filter", "path": "examples/AnimateDiff/animatediff/utils/freeinit_utils.py", "snippet": "def get_freq_filter(shape, device, filter_type, n, d_s, d_t):\n \"\"\"\n Form the frequency filter for noise reinitialization.\n\n Args:\n shape: shape of latent (B, C, T, H, W)\n filter_type: type of the freq filter\n n: (only for butterworth) order of the filter, larger n ~ ideal, smaller n ~ gaussian\n d_s: normalized stop frequency for spatial dimensions (0.0-1.0)\n d_t: normalized stop frequency for temporal dimension (0.0-1.0)\n \"\"\"\n if filter_type == \"gaussian\":\n return gaussian_low_pass_filter(shape=shape, d_s=d_s, d_t=d_t).to(device)\n elif filter_type == \"ideal\":\n return ideal_low_pass_filter(shape=shape, d_s=d_s, d_t=d_t).to(device)\n elif filter_type == \"box\":\n return box_low_pass_filter(shape=shape, d_s=d_s, d_t=d_t).to(device)\n elif filter_type == \"butterworth\":\n return butterworth_low_pass_filter(shape=shape, n=n, d_s=d_s, d_t=d_t).to(device)\n else:\n raise NotImplementedError" }, { "identifier": "freq_mix_3d", "path": "examples/AnimateDiff/animatediff/utils/freeinit_utils.py", "snippet": "def freq_mix_3d(x, noise, LPF):\n \"\"\"\n Noise reinitialization.\n\n Args:\n x: diffused latent\n noise: randomly sampled noise\n LPF: low pass filter\n \"\"\"\n # FFT\n x_freq = fft.fftn(x, dim=(-3, -2, -1))\n x_freq = fft.fftshift(x_freq, dim=(-3, -2, -1))\n noise_freq = fft.fftn(noise, dim=(-3, -2, -1))\n noise_freq = fft.fftshift(noise_freq, dim=(-3, -2, -1))\n\n # frequency mix\n HPF = 1 - LPF\n x_freq_low = x_freq * LPF\n noise_freq_high = noise_freq * HPF\n x_freq_mixed = x_freq_low + noise_freq_high # mix in freq domain\n\n # IFFT\n x_freq_mixed = fft.ifftshift(x_freq_mixed, dim=(-3, -2, -1))\n x_mixed = fft.ifftn(x_freq_mixed, dim=(-3, -2, -1)).real\n\n return x_mixed" }, { "identifier": "save_videos_grid", "path": "examples/AnimateDiff/animatediff/utils/util.py", "snippet": "def save_videos_grid(videos: torch.Tensor, path: str, rescale=False, n_rows=6, fps=8):\n videos = rearrange(videos, \"b c t h w -> t b c h w\")\n outputs = []\n for x in videos:\n x = torchvision.utils.make_grid(x, nrow=n_rows)\n x = x.transpose(0, 1).transpose(1, 2).squeeze(-1)\n if rescale:\n x = (x + 1.0) / 2.0 # -1,1 -> 0,1\n x = (x * 255).numpy().astype(np.uint8)\n outputs.append(x)\n\n os.makedirs(os.path.dirname(path), exist_ok=True)\n imageio.mimsave(path, outputs, fps=fps)" } ]
import inspect import numpy as np import torch import os from typing import Callable, List, Optional, Union from dataclasses import dataclass from tqdm import tqdm from diffusers.utils import is_accelerate_available from packaging import version from transformers import CLIPTextModel, CLIPTokenizer from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL from diffusers.pipeline_utils import DiffusionPipeline from diffusers.schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from diffusers.utils import deprecate, logging, BaseOutput from einops import rearrange from ..models.unet import UNet3DConditionModel from ..utils.freeinit_utils import ( get_freq_filter, freq_mix_3d, ) from ..utils.util import save_videos_grid from accelerate import cpu_offload
8,736
width = width or self.unet.config.sample_size * self.vae_scale_factor # Check inputs. Raise error if not correct # import pdb # pdb.set_trace() self.check_inputs(prompt, height, width, callback_steps) # Define call parameters # batch_size = 1 if isinstance(prompt, str) else len(prompt) batch_size = 1 if latents is not None: batch_size = latents.shape[0] if isinstance(prompt, list): batch_size = len(prompt) device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # Encode input prompt prompt = prompt if isinstance(prompt, list) else [prompt] * batch_size if negative_prompt is not None: negative_prompt = negative_prompt if isinstance(negative_prompt, list) else [negative_prompt] * batch_size text_embeddings = self._encode_prompt( prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt ) # Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # Prepare latent variables num_channels_latents = self.unet.in_channels latents = self.prepare_latents( batch_size * num_videos_per_prompt, num_channels_latents, video_length, height, width, text_embeddings.dtype, device, generator, latents, ) latents_dtype = latents.dtype # Prepare extra step kwargs. extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # Sampling with FreeInit. for iter in range(num_iters): # FreeInit ------------------------------------------------------------------ if iter == 0: initial_noise = latents.detach().clone() else: # 1. DDPM Forward with initial noise, get noisy latents z_T # if use_fast_sampling: # current_diffuse_timestep = self.scheduler.config.num_train_timesteps / num_iters * (iter + 1) - 1 # else: # current_diffuse_timestep = self.scheduler.config.num_train_timesteps - 1 current_diffuse_timestep = self.scheduler.config.num_train_timesteps - 1 # diffuse to t=999 noise level diffuse_timesteps = torch.full((batch_size,),int(current_diffuse_timestep)) diffuse_timesteps = diffuse_timesteps.long() z_T = self.scheduler.add_noise( original_samples=latents.to(device), noise=initial_noise.to(device), timesteps=diffuse_timesteps.to(device) ) # 2. create random noise z_rand for high-frequency z_rand = torch.randn((batch_size * num_videos_per_prompt, num_channels_latents, video_length, height // self.vae_scale_factor, width // self.vae_scale_factor), device=device) # 3. Roise Reinitialization latents = freq_mix_3d(z_T.to(dtype=torch.float32), z_rand, LPF=self.freq_filter) latents = latents.to(latents_dtype) # Coarse-to-Fine Sampling for Fast Inference (can lead to sub-optimal results) if use_fast_sampling: current_num_inference_steps= int(num_inference_steps / num_iters * (iter + 1)) self.scheduler.set_timesteps(current_num_inference_steps, device=device) timesteps = self.scheduler.timesteps # -------------------------------------------------------------------------- # Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: # if use_fast_sampling: # # Coarse-to-Fine Sampling for Fast Inference # current_num_inference_steps= int(num_inference_steps / num_iters * (iter + 1)) # current_timesteps = timesteps[:current_num_inference_steps] # else: current_timesteps = timesteps for i, t in enumerate(current_timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample.to(dtype=latents_dtype) # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample # call the callback, if provided if i == len(current_timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: callback(i, t, latents) # save intermediate results if save_intermediate: # Post-processing video = self.decode_latents(latents) video = torch.from_numpy(video) os.makedirs(save_dir, exist_ok=True)
# Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] @dataclass class AnimationFreeInitPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] orig_videos: Union[torch.Tensor, np.ndarray] class AnimationPipeline(DiffusionPipeline): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet3DConditionModel, scheduler: Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ], ): super().__init__() if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." " `clip_sample` should be set to False in the configuration file. Please make sure to update the" " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" ) deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["clip_sample"] = False scheduler._internal_dict = FrozenDict(new_config) is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( version.parse(unet.config._diffusers_version).base_version ) < version.parse("0.9.0.dev0") is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) def enable_vae_slicing(self): self.vae.enable_slicing() def disable_vae_slicing(self): self.vae.disable_slicing() def enable_sequential_cpu_offload(self, gpu_id=0): if is_accelerate_available(): else: raise ImportError("Please install accelerate via `pip install accelerate`") device = torch.device(f"cuda:{gpu_id}") for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]: if cpu_offloaded_model is not None: cpu_offload(cpu_offloaded_model, device) @property def _execution_device(self): if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"): return self.device for module in self.unet.modules(): if ( hasattr(module, "_hf_hook") and hasattr(module._hf_hook, "execution_device") and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return self.device def _encode_prompt(self, prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt): batch_size = len(prompt) if isinstance(prompt, list) else 1 text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None text_embeddings = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, ) text_embeddings = text_embeddings[0] # duplicate text embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = text_embeddings.shape text_embeddings = text_embeddings.repeat(1, num_videos_per_prompt, 1) text_embeddings = text_embeddings.view(bs_embed * num_videos_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = text_input_ids.shape[-1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None uncond_embeddings = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) uncond_embeddings = uncond_embeddings[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = uncond_embeddings.shape[1] uncond_embeddings = uncond_embeddings.repeat(1, num_videos_per_prompt, 1) uncond_embeddings = uncond_embeddings.view(batch_size * num_videos_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) return text_embeddings def decode_latents(self, latents): video_length = latents.shape[2] latents = 1 / 0.18215 * latents latents = rearrange(latents, "b c f h w -> (b f) c h w") # video = self.vae.decode(latents).sample video = [] for frame_idx in tqdm(range(latents.shape[0])): video.append(self.vae.decode(latents[frame_idx:frame_idx+1]).sample) video = torch.cat(video) video = rearrange(video, "(b f) c h w -> b c f h w", f=video_length) video = (video / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16 video = video.cpu().float().numpy() return video def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, callback_steps): if not isinstance(prompt, str) and not isinstance(prompt, list): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) def prepare_latents(self, batch_size, num_channels_latents, video_length, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, video_length, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: rand_device = "cpu" if device.type == "mps" else device if isinstance(generator, list): shape = shape # shape = (1,) + shape[1:] latents = [ torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype) for i in range(batch_size) ] latents = torch.cat(latents, dim=0).to(device) else: latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype).to(device) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], video_length: Optional[int], height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_videos_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "tensor", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: Optional[int] = 1, **kwargs, ): # Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # Check inputs. Raise error if not correct self.check_inputs(prompt, height, width, callback_steps) # Define call parameters # batch_size = 1 if isinstance(prompt, str) else len(prompt) batch_size = 1 if latents is not None: batch_size = latents.shape[0] if isinstance(prompt, list): batch_size = len(prompt) device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # Encode input prompt prompt = prompt if isinstance(prompt, list) else [prompt] * batch_size if negative_prompt is not None: negative_prompt = negative_prompt if isinstance(negative_prompt, list) else [negative_prompt] * batch_size text_embeddings = self._encode_prompt( prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt ) # Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # Prepare latent variables num_channels_latents = self.unet.in_channels latents = self.prepare_latents( batch_size * num_videos_per_prompt, num_channels_latents, video_length, height, width, text_embeddings.dtype, device, generator, latents, ) latents_dtype = latents.dtype # Prepare extra step kwargs. extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample.to(dtype=latents_dtype) # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: callback(i, t, latents) # Post-processing video = self.decode_latents(latents) # Convert to tensor if output_type == "tensor": video = torch.from_numpy(video) if not return_dict: return video return AnimationPipelineOutput(videos=video) class AnimationFreeInitPipeline(AnimationPipeline): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet3DConditionModel, scheduler: Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ], ): super().__init__(vae, text_encoder, tokenizer, unet, scheduler) self.freq_filter = None @torch.no_grad() def init_filter(self, video_length, height, width, filter_params): # initialize frequency filter for noise reinitialization batch_size = 1 num_channels_latents = self.unet.in_channels filter_shape = [ batch_size, num_channels_latents, video_length, height // self.vae_scale_factor, width // self.vae_scale_factor ] # self.freq_filter = get_freq_filter(filter_shape, device=self._execution_device, params=filter_params) self.freq_filter = get_freq_filter( filter_shape, device=self._execution_device, filter_type=filter_params.method, n=filter_params.n if filter_params.method=="butterworth" else None, d_s=filter_params.d_s, d_t=filter_params.d_t ) @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], video_length: Optional[int], height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_videos_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "tensor", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: Optional[int] = 1, # freeinit args num_iters: int = 5, use_fast_sampling: bool = False, save_intermediate: bool = False, return_orig: bool = False, save_dir: str = None, save_name: str = None, use_fp16: bool = False, **kwargs ): if use_fp16: print('Warning: using half percision for inferencing!') self.vae.to(dtype=torch.float16) self.unet.to(dtype=torch.float16) self.text_encoder.to(dtype=torch.float16) # Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # Check inputs. Raise error if not correct # import pdb # pdb.set_trace() self.check_inputs(prompt, height, width, callback_steps) # Define call parameters # batch_size = 1 if isinstance(prompt, str) else len(prompt) batch_size = 1 if latents is not None: batch_size = latents.shape[0] if isinstance(prompt, list): batch_size = len(prompt) device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # Encode input prompt prompt = prompt if isinstance(prompt, list) else [prompt] * batch_size if negative_prompt is not None: negative_prompt = negative_prompt if isinstance(negative_prompt, list) else [negative_prompt] * batch_size text_embeddings = self._encode_prompt( prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt ) # Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # Prepare latent variables num_channels_latents = self.unet.in_channels latents = self.prepare_latents( batch_size * num_videos_per_prompt, num_channels_latents, video_length, height, width, text_embeddings.dtype, device, generator, latents, ) latents_dtype = latents.dtype # Prepare extra step kwargs. extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # Sampling with FreeInit. for iter in range(num_iters): # FreeInit ------------------------------------------------------------------ if iter == 0: initial_noise = latents.detach().clone() else: # 1. DDPM Forward with initial noise, get noisy latents z_T # if use_fast_sampling: # current_diffuse_timestep = self.scheduler.config.num_train_timesteps / num_iters * (iter + 1) - 1 # else: # current_diffuse_timestep = self.scheduler.config.num_train_timesteps - 1 current_diffuse_timestep = self.scheduler.config.num_train_timesteps - 1 # diffuse to t=999 noise level diffuse_timesteps = torch.full((batch_size,),int(current_diffuse_timestep)) diffuse_timesteps = diffuse_timesteps.long() z_T = self.scheduler.add_noise( original_samples=latents.to(device), noise=initial_noise.to(device), timesteps=diffuse_timesteps.to(device) ) # 2. create random noise z_rand for high-frequency z_rand = torch.randn((batch_size * num_videos_per_prompt, num_channels_latents, video_length, height // self.vae_scale_factor, width // self.vae_scale_factor), device=device) # 3. Roise Reinitialization latents = freq_mix_3d(z_T.to(dtype=torch.float32), z_rand, LPF=self.freq_filter) latents = latents.to(latents_dtype) # Coarse-to-Fine Sampling for Fast Inference (can lead to sub-optimal results) if use_fast_sampling: current_num_inference_steps= int(num_inference_steps / num_iters * (iter + 1)) self.scheduler.set_timesteps(current_num_inference_steps, device=device) timesteps = self.scheduler.timesteps # -------------------------------------------------------------------------- # Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: # if use_fast_sampling: # # Coarse-to-Fine Sampling for Fast Inference # current_num_inference_steps= int(num_inference_steps / num_iters * (iter + 1)) # current_timesteps = timesteps[:current_num_inference_steps] # else: current_timesteps = timesteps for i, t in enumerate(current_timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample.to(dtype=latents_dtype) # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample # call the callback, if provided if i == len(current_timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: callback(i, t, latents) # save intermediate results if save_intermediate: # Post-processing video = self.decode_latents(latents) video = torch.from_numpy(video) os.makedirs(save_dir, exist_ok=True)
save_videos_grid(video, f"{save_dir}/{save_name}_iter{iter}.gif")
3
2023-12-12 13:11:24+00:00
12k
allenai/unified-io-2
t5x/trainer_test.py
[ { "identifier": "metrics", "path": "t5x/metrics.py", "snippet": "def _check_param(value, *, ndim=None, dtype=jnp.float32):\n def from_model_output(cls, values: Scalar, **_) -> clu_metrics.Metric:\n def merge(self, other: \"Sum\") -> \"Sum\":\n def compute(self) -> Scalar:\n def replace_steps(self, steps) -> \"Step\":\n def compute(self) -> Scalar:\n def from_model_output(cls,\n values: Scalar,\n steps: Optional[int] = 1,\n **_) -> clu_metrics.Metric:\n def merge(self, other: \"AveragePerStep\") -> \"AveragePerStep\":\n def compute(self) -> Scalar:\n def from_model_output(cls,\n values: Any,\n **_) -> clu_metrics.Metric:\n def merge(self, other: \"CurrentOnStep\") -> \"CurrentOnStep\":\n def compute(self) -> Scalar:\n def merge(self, other: \"Time\") -> \"Time\":\n def compute(self) -> Scalar:\n def replace_duration(self, duration: Scalar) -> \"Time\":\n def from_model_output(cls, numerator: float, **_) -> clu_metrics.Metric:\n def merge(self, other: \"TimeRate\") -> \"TimeRate\":\n def compute(self) -> Scalar:\n def replace_duration(self, duration: Scalar) -> \"Time\":\n def from_model_output(cls,\n steps: Optional[int] = 1,\n **_) -> clu_metrics.Metric:\n def merge(self, other: \"StepsPerTime\") -> \"StepsPerTime\":\n def compute(self) -> Scalar:\ndef is_metric_obj(obj):\ndef is_time_metric(obj):\ndef create_metrics_dict(float_metrics_dict):\ndef shape_obj_to_defined_obj(obj: clu_metrics.Metric):\n def class_attr_shape(a):\ndef set_time_metrics_duration(metrics, duration):\n def fn(o):\ndef set_step_metrics_num_steps(metrics, num_steps):\n def fn(o):\nclass Sum(clu_metrics.Metric):\nclass Step(clu_metrics.Metric):\nclass AveragePerStep(Step):\nclass CurrentOnStep(clu_metrics.Metric):\nclass Time(clu_metrics.Metric):\nclass TimeRate(Time):\nclass StepsPerTime(Step, Time):" }, { "identifier": "models", "path": "t5x/models.py", "snippet": "class TokensIdsToLogitsCallable(typing_extensions.Protocol):\nclass DecodeFnCallable(typing_extensions.Protocol):\nclass BaseModel(abc.ABC):\nclass BaseTransformerModel(BaseModel):\nclass EncoderDecoderModel(BaseTransformerModel):\nclass DecoderOnlyModel(BaseTransformerModel):\n def __call__(\n self, decoding_state: decoding.DecodingState\n ) -> Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]:\n def __call__(self, *, inputs: jnp.ndarray, cache: Mapping[str, jnp.ndarray],\n tokens_to_logits: TokensIdsToLogitsCallable, eos_id: int,\n num_decodes: int, decode_rng: Optional[jax.random.KeyArray],\n cache_offset: int, **kwargs) -> Tuple[jnp.ndarray, jnp.ndarray]:\n def __init__(self, optimizer_def: optimizers.OptimizerDefType):\n def loss_fn(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n dropout_rng: Optional[jax.random.KeyArray],\n ) -> Tuple[jnp.ndarray, MetricsMap]:\n def eval_fn(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n ) -> Tuple[jnp.ndarray, MetricsMap]:\n def predict_batch(self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n rng: Optional[jax.random.KeyArray] = None) -> jnp.ndarray:\n def predict_batch_with_aux(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n rng: Optional[jax.random.KeyArray] = None,\n ) -> Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]:\n def score_batch(self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n return_intermediates: bool = False) -> jnp.ndarray:\n def get_initial_variables(\n self,\n rng: jax.random.KeyArray,\n input_shapes: Mapping[str, Array],\n input_types: Optional[Mapping[str, jnp.dtype]] = None\n ) -> flax_scope.FrozenVariableDict:\n def __init__(\n self,\n module: nn.Module,\n input_vocabulary: seqio.Vocabulary,\n output_vocabulary: seqio.Vocabulary,\n optimizer_def: optimizers.OptimizerDefType,\n decode_fn: Optional[DecodeFnCallable] = None,\n label_smoothing: float = 0.0,\n z_loss: float = 0.0,\n loss_normalizing_factor: Optional[Union[\n float, int, str, losses.SpecialLossNormalizingFactor]] = None,\n ):\n def input_vocabulary(self):\n def output_vocabulary(self):\n def decode_fn(self):\n def _compute_logits(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n dropout_rng: Optional[jax.random.KeyArray] = None) -> jnp.ndarray:\n def loss_fn(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n dropout_rng: Optional[jax.random.KeyArray],\n ) -> Tuple[jnp.ndarray, MetricsMap]:\n def _compute_metrics(\n self,\n logits: jnp.ndarray,\n targets: jnp.ndarray,\n mask: jnp.ndarray,\n loss: jnp.ndarray,\n z_loss: Optional[jnp.ndarray] = None,\n segment_ids: Optional[Mapping[str, jnp.ndarray]] = None,\n ) -> MetricsMap:\n def __init__(\n self,\n module: nn.Module,\n input_vocabulary: seqio.Vocabulary,\n output_vocabulary: seqio.Vocabulary,\n optimizer_def: optimizers.OptimizerDefType,\n decode_fn: DecodeFnCallable = decoding.beam_search,\n feature_converter_cls: Optional[Callable[...,\n seqio.FeatureConverter]] = None,\n label_smoothing: float = 0.0,\n z_loss: float = 0.0,\n loss_normalizing_factor: Optional[float] = None,\n ):\n def get_initial_variables(\n self,\n rng: jax.random.KeyArray,\n input_shapes: Mapping[str, Array],\n input_types: Optional[Mapping[str, jnp.dtype]] = None\n ) -> flax_scope.FrozenVariableDict:\n def _compute_logits(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n dropout_rng: Optional[jax.random.KeyArray] = None,\n mutable: flax_scope.CollectionFilter = False,\n other_variables: Optional[PyTreeDef] = None,\n ) -> Union[jnp.ndarray, Tuple[jnp.ndarray, flax_scope.FrozenVariableDict]]:\n def _compute_logits_from_slice(\n self, decoding_state: decoding.DecodingState, params: PyTreeDef,\n encoded_inputs: jnp.ndarray, raw_inputs: jnp.ndarray,\n max_decode_length: int) -> Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]:\n def predict_batch_with_aux(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n rng: Optional[jax.random.KeyArray] = None,\n decoder_params: Optional[MutableMapping[str, Any]] = None,\n return_all_decodes: bool = False,\n num_decodes: int = 1,\n prompt_with_targets: bool = False\n ) -> Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]:\n def score_batch(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n return_intermediates: bool = False,\n ) -> Union[jnp.ndarray, Tuple[jnp.ndarray, Mapping[str, Any]]]:\n def __init__(\n self,\n module: nn.Module,\n vocabulary: seqio.Vocabulary,\n optimizer_def: optimizers.OptimizerDefType,\n decode_fn: DecodeFnCallable = decoding.temperature_sample,\n inputs_bidirectional_attention: bool = False,\n feature_converter_cls: Optional[Callable[...,\n seqio.FeatureConverter]] = None,\n label_smoothing: float = 0.0,\n z_loss: float = 0.0,\n loss_normalizing_factor: Optional[float] = None,\n ):\n def get_initial_variables(\n self,\n rng: jax.random.KeyArray,\n input_shapes: Mapping[str, Array],\n input_types: Optional[Mapping[str, jnp.dtype]] = None\n ) -> flax_scope.FrozenVariableDict:\n def _get_decoder_causal_attention(self, batch):\n def _compute_logits(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n dropout_rng: Optional[jax.random.KeyArray] = None,\n mutable: flax_scope.CollectionFilter = False) -> jnp.ndarray:\n def _compute_logits_from_slice(\n self,\n decoding_state: decoding.DecodingState,\n params: PyTreeDef,\n max_decode_length: int,\n ) -> Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]:\n def score_batch(self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n return_intermediates: bool = False) -> jnp.ndarray:\n def _compute_kv_cache(\n self,\n params: PyTreeDef,\n inputs: jnp.ndarray,\n inputs_lengths: jnp.ndarray,\n decoder_causal_attention: jnp.ndarray,\n ) -> PyTreeDef:\n def predict_batch_with_aux(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n rng: Optional[jax.random.KeyArray] = None,\n *,\n return_all_decodes: bool = False,\n num_decodes: int = 1,\n decoder_params: Optional[MutableMapping[str, Any]] = None,\n ) -> Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]:\ndef remove_prefix(sequence: jnp.ndarray,\n prefix_length: jnp.ndarray) -> jnp.ndarray:\ndef compute_weighted_accuracy(\n logits: jnp.ndarray,\n targets: jnp.ndarray,\n weights: Optional[jnp.ndarray] = None) -> Tuple[jnp.ndarray, jnp.ndarray]:\ndef compute_metrics(logits: jnp.ndarray, targets: jnp.ndarray,\n weights: jnp.ndarray, loss: jnp.ndarray,\n weight_sum: jnp.ndarray,\n additional_metrics: MetricsMap) -> MetricsMap:\ndef compute_base_metrics(\n logits: jnp.ndarray,\n targets: jnp.ndarray,\n mask: jnp.ndarray,\n loss: jnp.ndarray,\n z_loss: Optional[jnp.ndarray] = None,\n segment_ids: Optional[Mapping[str, jnp.ndarray]] = None,\n) -> MetricsMap:\ndef get_input_vocabulary(model: BaseTransformerModel) -> seqio.Vocabulary:\ndef get_output_vocabulary(model: BaseTransformerModel) -> seqio.Vocabulary:\n FEATURE_CONVERTER_CLS: Callable[..., seqio.FeatureConverter]\n FEATURE_CONVERTER_CLS = seqio.EncDecFeatureConverter\n FEATURE_CONVERTER_CLS = seqio.DecoderFeatureConverter" }, { "identifier": "optimizers", "path": "t5x/optimizers.py", "snippet": "class OptimizerState:\nclass OptimizerDef:\nclass Optimizer(struct.PyTreeNode):\nclass OptaxStatePartitionRules:\nclass _OptaxWrapperHyperParams:\nclass OptaxWrapper(OptimizerDef):\nclass _Marker:\nclass MultiOptimizer(OptimizerDef):\n def __init__(self, hyper_params):\n def apply_gradient(self, hyper_params, params, state, grads):\n def init_state(self, params):\n def update_hyper_params(self, **hyper_param_overrides):\n def create(self, target):\n def state_dict(self, target, state):\n def restore_state(self, opt_target, opt_state, state_dict):\n def apply_gradient(self, grads, **hyper_param_overrides):\n def state_dict(self):\n def restore_state(self, state):\n def _is_optax_state(cls, x):\n def derive_optax_logical_axes(cls, optax_state, params_axes):\n def derive_fn(x):\n def __init__(self, optax_optimizer: optax.GradientTransformation):\n def init_state(self, params):\n def apply_gradient(self, hyper_params, params, state, grads):\n def derive_logical_axes(self, optimizer, param_logical_axes):\n def state_dict(self, target, state):\n def restore_state(self, opt_target, opt_state, state_dict):\ndef wrap_optax_optimizer(optax_optimizer):\n def wrapped_optimizer(*args, **kwargs) -> OptimizerDef:\ndef chain(\n transformations: Sequence[optax.GradientTransformation]\n) -> optax.GradientTransformation:\n def __init__(self):\ndef _tree_of_paths(tree):\ndef _subtree_from_traversal(traversal, tree):\ndef _update_subtree_of_traversal(traversal, tree, update):\n def __init__(\n self, traversals_and_optimizers: Sequence[Tuple[traverse_util.Traversal,\n OptimizerDef]]):\n def init_state(self, params):\n def apply_gradient(self, hyper_params, params, state, grads):\n def update_hyper_params(self, **hyper_param_overrides):\n def set_param_axes(self, param_logical_axes):\n def derive_logical_axes(self, optimizer, param_logical_axes):\n _RULES = {\n\n # Leaf Optax States:\n optax.AddNoiseState:\n lambda state, params_axes: optax.AddNoiseState(\n count=None, rng_key=None),\n optax.DifferentiallyPrivateAggregateState:\n lambda state, params_axes: optax.DifferentiallyPrivateAggregateState(\n rng_key=None),\n optax.EmaState:\n lambda state, params_axes: optax.EmaState(\n count=None, ema=params_axes),\n optax.EmptyState:\n lambda state, params_axes: optax.EmptyState(),\n optax.TraceState:\n lambda state, params_axes: optax.TraceState(trace=params_axes),\n optax.ScaleByAdamState:\n lambda state, params_axes: optax.ScaleByAdamState(\n count=None, mu=params_axes, nu=params_axes),\n optax.ScaleByBeliefState:\n lambda state, params_axes: optax.ScaleByBeliefState(\n count=None, mu=params_axes, nu=params_axes),\n optax.ScaleByRssState:\n lambda state, params_axes: optax.ScaleByRssState(\n sum_of_squares=params_axes),\n optax.ScaleByRmsState:\n lambda state, params_axes: optax.ScaleByRmsState(nu=params_axes),\n optax.ScaleByRStdDevState:\n lambda state, params_axes: optax.ScaleByRStdDevState(\n mu=params_axes, nu=params_axes),\n optax.ScaleBySM3State:\n lambda state, params_axes: optax.ScaleBySM3State(\n mu=params_axes, nu=params_axes),\n optax.ScaleByTrustRatioState:\n lambda state, params_axes: optax.ScaleByTrustRatioState(),\n optax.ScaleByScheduleState:\n lambda state, params_axes: optax.ScaleByScheduleState(count=None),\n optax.ScaleByFromageState:\n lambda state, params_axes: optax.ScaleByFromageState(count=None),\n optax.ZeroNansState:\n lambda state, params_axes: optax.ZeroNansState(found_nan=None),\n # FactoredState\n\n # Recursive, Combinator Optax States:\n\n # MaskedState\n optax.MaskedState:\n lambda state, params_axes: optax.MaskedState(\n inner_state=OptaxStatePartitionRules.derive_optax_logical_axes(\n state.inner_state, params_axes)),\n optax.InjectHyperparamsState:\n lambda state, params_axes: optax.InjectHyperparamsState(\n count=None,\n hyperparams=jax.tree_map(lambda x: None, state.hyperparams),\n inner_state=OptaxStatePartitionRules.derive_optax_logical_axes(\n state.inner_state, params_axes)),\n optax.MultiStepsState:\n lambda state, params_axes: optax.MultiStepsState(\n mini_step=None,\n gradient_step=None,\n inner_opt_state=OptaxStatePartitionRules.\n derive_optax_logical_axes( # pylint: disable=line-too-long\n state.inner_opt_state, params_axes),\n acc_grads=params_axes),\n optax.ApplyIfFiniteState:\n lambda state, params_axes: optax.ApplyIfFiniteState(\n notfinite_count=None,\n last_finite=None,\n total_notfinite=None,\n inner_state=OptaxStatePartitionRules.derive_optax_logical_axes(\n state.inner_state, params_axes)),\n optax.MaybeUpdateState:\n lambda state, params_axes: optax.MaybeUpdateState(\n inner_state=OptaxStatePartitionRules.derive_optax_logical_axes(\n state.inner_state, params_axes),\n step=None),\n optax.MultiTransformState:\n lambda state, params_axes: optax.MultiTransformState(\n inner_states=OptaxStatePartitionRules.derive_optax_logical_axes(\n state.inner_states, params_axes)),\n # LookaheadState\n # SplitRealAndImaginaryState\n }" }, { "identifier": "partitioning", "path": "t5x/partitioning.py", "snippet": "class AxisNames(tuple):\nclass LocalChunkInfo:\nclass LocalChunker:\nclass DataLayout:\nclass BasePartitioner(metaclass=abc.ABCMeta):\nclass PjittedFnWithContext(PartitionedCallable):\nclass BasePjitPartitioner(BasePartitioner):\nclass PjitPartitioner(BasePjitPartitioner):\n def __new__(cls, *names):\n def __repr__(self):\ndef pjit(\n fun: Callable, # pylint: disable=g-bare-generic\n in_axis_resources,\n out_axis_resources,\n static_argnums: Union[int, Sequence[int]] = (),\n donate_argnums: Union[int, Sequence[int]] = (),\n backend: Optional[str] = None):\ndef pjit_with_cpu_fallback(\n fun: Callable, # pylint: disable=g-bare-generic\n in_axis_resources,\n out_axis_resources,\n static_argnums: Union[int, Sequence[int]] = (),\n donate_argnums: Union[int, Sequence[int]] = (),\n backend: Optional[str] = None):\ndef with_sharding_constraint(x, axis_resources):\ndef bounds_from_last_device(\n last_device: jax.lib.xla_client.Device) -> HardwareMesh:\ndef get_coords(device: jax.lib.xla_client.Device) -> HardwareMesh:\ndef global_mesh_defined():\ndef get_mesh(model_parallel_submesh: HardwareMesh,\n input_devices: Sequence[JaxDevice] = (),\n input_local_devices: Sequence[JaxDevice] = (),\n tile_by_host_if_needed: bool = True,\n backend: Optional[str] = None) -> Mesh:\n def dh_dd_mh_md(g: int, m: int, l: int) -> Tuple[int, int, int, int]:\ndef get_cpu_mesh() -> Mesh:\ndef get_gpu_mesh(num_partitions: int) -> Mesh:\ndef default_mesh(num_partitions: int,\n model_parallel_submesh: Optional[HardwareMesh] = None,\n backend: Optional[str] = None) -> Mesh:\n def __init__(self, global_mesh: Mesh):\n def get_local_chunk_info(\n self, global_shape: Tuple[int, ...],\n mesh_axes: Sequence[Optional[str]]) -> LocalChunkInfo:\ndef standard_logical_axis_rules(\n activation_partitioning_dims: int = 1,\n parameter_partitioning_dims: int = 1,\n additional_rules: Optional[LogicalAxisRules] = None) -> LogicalAxisRules:\ndef _id_fn(x, ix):\n def __init__(self,\n num_partitions: Optional[int] = None,\n model_parallel_submesh: Optional[HardwareMesh] = None,\n params_on_devices: bool = True,\n backend: Optional[str] = None):\n def mesh(self) -> Mesh:\n def data_partition_spec(self) -> PartitionSpec:\n def get_data_layout(self,\n batch_size: Optional[int] = None,\n host_index: Optional[int] = None) -> DataLayout:\n def get_local_chunk_info(\n self, global_shape: Tuple[int, ...],\n mesh_axes: Sequence[Optional[str]]) -> LocalChunkInfo:\n def params_on_devices(self):\n def move_params_to_devices(self, train_state: TrainState,\n train_state_axes: TrainState) -> TrainState:\n def _local_chunker(self):\n def get_logical_axes(self, train_state: TrainState) -> TrainState:\n def get_mesh_axes(self, train_state: TrainState) -> TrainState:\n def partition(\n self,\n fn: Callable, # pylint: disable=g-bare-generic\n in_axis_resources,\n out_axis_resources,\n static_argnums: Union[int, Sequence[int]] = (),\n donate_argnums: Union[int, Sequence[int]] = ()\n ) -> PartitionedCallable:\n def compile(self, partitioned_fn: PartitionedCallable,\n *args) -> CompiledPartitionedCallable:\n def __init__(self,\n pjitted_fn,\n partition_mesh: Mesh,\n logical_axis_rules: flax_partitioning.LogicalRules = ()):\n def __call__(self, *args):\n def lower(self, *args):\n def _local_chunker(self) -> LocalChunker:\n def mesh(self) -> Mesh:\n def partition(\n self,\n fn: Callable, # pylint: disable=g-bare-generic\n in_axis_resources,\n out_axis_resources,\n static_argnums: Union[int, Sequence[int]] = (),\n donate_argnums: Union[int, Sequence[int]] = ()\n ) -> PjittedFnWithContext:\n def compile(self, partitioned_fn: PjittedFnWithContext,\n *args) -> CompiledPartitionedCallable:\n def __init__(self,\n num_partitions: Optional[int] = None,\n model_parallel_submesh: Optional[HardwareMesh] = None,\n params_on_devices: bool = True,\n backend: Optional[str] = None,\n logical_axis_rules: Optional[LogicalAxisRules] = None,\n use_cpu_pjit: Optional[bool] = False):\n def partition(\n self,\n fn: Callable, # pylint: disable=g-bare-generic\n in_axis_resources,\n out_axis_resources,\n static_argnums: Union[int, Sequence[int]] = (),\n donate_argnums: Union[int, Sequence[int]] = ()\n ) -> PjittedFnWithContext:\n def logical_axis_rules(self):\n def get_logical_axes(self, train_state: TrainState) -> TrainState:\n def get_mesh_axes(self, train_state: TrainState) -> TrainState:\n def _logical_to_mesh_axes(param_name, logical_axes):" }, { "identifier": "test_utils", "path": "t5x/test_utils.py", "snippet": "class CpuDevice:\nclass GpuDevice:\nclass TpuDevice:\n class DummyVocab:\ndef coords_to_idx(coords: Tuple[int, ...], bounds: Tuple[int, ...]) -> int:\ndef make_devices(nx: int,\n ny: int,\n nz: int,\n nc: int = 2,\n host_layout: Tuple[int, ...] = (2, 2, 1, 2),\n kind='TPU v3'):\ndef get_t5_test_model(**config_overrides) -> models.EncoderDecoderModel:\ndef with_mesh(named_shape: MeshSpec) -> Generator[None, None, None]:\ndef create_global_mesh(mesh_shape, axis_names):\ndef get_fake_vocab():\ndef get_fake_tokenized_dataset(*_, split='validation', **__):\ndef assert_equal(a, b):\ndef assert_same(tree_a, tree_b):\ndef get_train_state_from_variables(variables,\n optimizer_def=adafactor.Adafactor(0.0)):\n_FAKE_TOKENIZED_DATASET = {\n 'train': [\n {\n 'inputs': (3, 13, 7, 14, 15, 9, 4, 16),\n 'inputs_pretokenized': 'complete: this',\n 'targets': (3, 8, 6, 3, 5, 10),\n 'targets_pretokenized': 'is a test'\n },\n {\n 'inputs': (3, 13, 7, 14, 15, 9, 4, 16),\n 'inputs_pretokenized': 'complete: that',\n 'targets': (17, 5, 6, 3, 5, 10),\n 'targets_pretokenized': 'was a test'\n },\n {\n 'inputs': (3, 13, 7, 14, 15, 9, 4, 16),\n 'inputs_pretokenized': 'complete: those',\n 'targets': (17, 4, 23, 4, 10, 6),\n 'targets_pretokenized': 'were tests'\n },\n ],\n # Notice that we repeat consecutively each examples 4 times,\n # this needed for tests like infer_tests to validate determinism.\n 'validation': [{\n 'inputs': (3, 13, 7, 14, 15, 9, 4, 16),\n 'inputs_pretokenized': 'complete: this',\n 'targets': (3, 8, 6, 3, 5, 3, 25, 5),\n 'targets_pretokenized': 'is a validation',\n }] * 4 + [{\n 'inputs': (3, 13, 7, 14, 15, 9, 4, 17),\n 'inputs_pretokenized': 'complete: that',\n 'targets': (17, 5, 6, 3, 5, 22, 7, 24),\n 'targets_pretokenized': 'was another validation',\n }] * 4\n}" }, { "identifier": "train_state", "path": "t5x/train_state.py", "snippet": "EMPTY_DICT = flax.core.freeze({})\nclass TrainState(typing_extensions.Protocol):\nclass FlaxOptimTrainState(flax.struct.PyTreeNode):\nclass GanOptimTrainState(FlaxOptimTrainState):\nclass InferenceState(flax.struct.PyTreeNode):\n def step(self) -> jnp.ndarray:\n def params(self) -> FrozenVariableDict:\n def param_states(self) -> FrozenVariableDict:\n def flax_mutables(self) -> FrozenVariableDict:\n def state_dict(self) -> MutableVariableDict:\n def restore_state(self, state_dict: Mapping[str, Any]) -> 'TrainState':\n def replace_params(self, params: VariableDict) -> 'TrainState':\n def replace_flax_mutables(self, flax_mutables: FrozenDict) -> 'TrainState':\n def replace_step(self, step: jnp.ndarray) -> 'TrainState':\n def apply_gradient(self,\n grads,\n learning_rate,\n flax_mutables=EMPTY_DICT) -> 'TrainState':\n def as_logical_axes(self) -> 'TrainState':\ndef _validate_params_axes(params_axes, params):\ndef _split_variables_and_axes(\n variables_and_axes: FrozenVariableDict\n) -> Tuple[FrozenVariableDict, FrozenVariableDict]:\n def create(cls, optimizer_def: optimizers.OptimizerDefType,\n model_variables: FrozenVariableDict) -> 'FlaxOptimTrainState':\n def step(self) -> jnp.ndarray:\n def params(self) -> FrozenVariableDict:\n def param_states(self) -> FrozenVariableDict:\n def state_dict(self) -> MutableVariableDict:\n def apply_gradient(self,\n grads,\n learning_rate,\n flax_mutables=EMPTY_DICT) -> 'FlaxOptimTrainState':\n def replace_params(self, params: VariableDict) -> 'FlaxOptimTrainState':\n def replace_flax_mutables(self,\n flax_mutables: FrozenDict) -> 'FlaxOptimTrainState':\n def replace_step(self, step: jnp.ndarray) -> 'FlaxOptimTrainState':\n def restore_state(self, state_dict: VariableDict) -> 'FlaxOptimTrainState':\n def as_logical_axes(self) -> 'FlaxOptimTrainState':\n def apply_gradient(self,\n grads,\n learning_rate,\n flax_mutables=EMPTY_DICT) -> 'FlaxOptimTrainState':\n def create(cls, model_variables: FrozenVariableDict) -> 'InferenceState':\n def param_states(self) -> FrozenVariableDict:\n def apply_gradient(self, *args, **kwargs) -> 'InferenceState':\n def state_dict(self) -> MutableMapping[str, Any]:\n def replace_step(self, step: jnp.ndarray) -> 'InferenceState':\n def replace_params(self, params: FrozenVariableDict) -> 'InferenceState':\n def replace_flax_mutables(self,\n flax_mutables: FrozenDict) -> 'InferenceState':\n def restore_state(self, state_dict: Mapping[str, Any]) -> 'InferenceState':\n def as_logical_axes(self) -> 'InferenceState':" }, { "identifier": "trainer", "path": "t5x/trainer.py", "snippet": "def _merge_metrics(a, b):\ndef merge_metrics(a, b):\n def result(self) -> Mapping[str, Array]:\n def result(self) -> Mapping[str, clu.values.Value]:\n def result(self) -> float:\n def __call__(\n self,\n step: jnp.ndarray,\n ) -> jnp.ndarray:\n def __call__(self, metrics: MetricMapType, duration: float,\n num_steps: int) -> Mapping[str, jnp.ndarray]:\n def __call__(\n self, train_state: train_state_lib.TrainState,\n batch: BatchType) -> Tuple[train_state_lib.TrainState, MetricMapType]:\n def __call__(self, train_state: train_state_lib.TrainState,\n batch: jnp.ndarray) -> MetricMapType:\n def compute_metrics(\n self, gradients: ModelWeights,\n old_train_state: train_state_lib.TrainState,\n new_train_state: train_state_lib.TrainState) -> MutableMetricMapType:\n def _make_rms_metrics(name, tree):\n def _make_max_metrics(name, tree):\n def compute_metrics(\n self, gradients: ModelWeights,\n old_train_state: train_state_lib.TrainState,\n new_train_state: train_state_lib.TrainState) -> MutableMetricMapType:\n def __init__(self):\n def close(self):\n def __del__(self):\n def _get_completion_future(self, block_on: PyTreeDef = ()) -> TimeFuture:\n def _get_completion_time():\n def start(self, block_on: PyTreeDef = ()):\n def stop(self, block_on: PyTreeDef = ()) -> TimeFuture:\n def __init__(self, name: str, summary_dir: Optional[str] = None, log_to_wandb=False):\n def __del__(self):\n def close(self):\n def summary_writer(self) -> metric_writers.MetricWriter:\n def write_scalar(self, key: str, val: metric_writers.interface.Scalar,\n step: int):\n def write_scalars(self, step: int,\n scalars: Mapping[str, metric_writers.interface.Scalar]):\n def start_duration_timer(self, block_on: PyTreeDef = ()):\n def write_metrics_summary(self, metrics: MetricMapType, step: int,\n num_steps: int) -> MetricValueMapFuture:\n def _summarize_and_write():\n def _ensure_not_on_device(x):\n def flush(self):\n def __init__(self, model: models.BaseModel,\n train_state: train_state_lib.TrainState,\n partitioner: partitioning.BasePartitioner,\n eval_names: Sequence[str], summary_dir: Optional[str],\n train_state_axes: Any, rng: Rng,\n use_wandb=False, packing_strategy=None, log_weights=None):\n def __enter__(self):\n def __exit__(self, exc_type, exc_value, traceback):\n def close(self):\n def _get_step_rng(self, step: int) -> Rng:\n def train_state(self):\n def train_state(self, train_state: PyTreeDef):\n def _weight_metric_fn(self):\n def _get_weight_metrics_fn(_params):\n def train(self,\n batch_iter: Union[Iterator[BatchType],\n clu.data.dataset_iterator.DatasetIterator],\n num_steps: int,\n start_step: Optional[int] = None) -> ArrayMapFuture:\n def compile_train(self, batch: ElementSpec) -> None:\n def eval(\n self, batch_iters: Mapping[str,\n Iterator[BatchType]], pbar_nsteps=None) -> Mapping[str, Array]:\n def compile_eval(self, batches: Mapping[str, BatchType]) -> None:\n def _partitioned_train_step(self) -> PartitionedTrainCallable:\n def _partitioned_eval_step(self) -> PartitionedEvalCallable:\ndef accumulate_grads_microbatched(\n model: models.BaseModel,\n train_state: train_state_lib.TrainState,\n batch: BatchType,\n dropout_rng: Rng,\n num_microbatches: Optional[int],\n data_partition_spec: PartitionSpec = PartitionSpec(\"data\"),\n loss_fn_args=None\n) -> Tuple[train_state_lib.TrainState, MutableMetricMapType,\n def get_microbatch(batch: BatchType, idx: int) -> Mapping[str, jnp.ndarray]:\n def metrics_and_grad(loop_cnt, dropout_rng, flax_mutables=None):\n def per_microbatch_train_step(\n loop_cnt: int, state: Tuple[jnp.ndarray, jnp.ndarray,\n Mapping[str, jnp.ndarray],\n Optional[FlaxMutables]]\n ) -> Tuple[jnp.ndarray, jnp.ndarray, Mapping[str, jnp.ndarray],\ndef apply_grads(\n train_state: train_state_lib.TrainState,\n grad_accum: ModelWeights,\n metrics: MutableMetricMapType,\n learning_rate: jnp.ndarray,\n weight_metrics_computer: Optional[WeightMetricsComputer],\n other_state_variables: Optional[Mapping[str, Any]] = None\n) -> Tuple[train_state_lib.TrainState, MetricMapType]:\ndef eval_step(model: models.BaseModel, train_state: train_state_lib.TrainState,\n batch: jnp.ndarray) -> MetricMapType:\ndef train_with_lr(\n train_state: train_state_lib.TrainState,\n batch: BatchType,\n learning_rate: jnp.ndarray,\n dropout_rng: Rng,\n model: models.BaseModel,\n num_microbatches: Optional[int],\n weight_metrics_computer: Optional[WeightMetricsComputer] = None,\n data_partition_spec: PartitionSpec = PartitionSpec(\"data\"),\n loss_fn_args=None\n):\n def __call__(self, model: models.BaseModel,\n train_state: train_state_lib.TrainState,\n partitioner: partitioning.BasePartitioner,\n eval_names: Sequence[str], summary_dir: Optional[str],\n train_state_axes: Any, rng: Rng) -> BaseTrainer:\n def __init__(self,\n model: models.BaseModel,\n train_state: train_state_lib.TrainState,\n partitioner: partitioning.BasePartitioner,\n eval_names: Sequence[str],\n summary_dir: Optional[str],\n train_state_axes: Any,\n rng: Rng,\n learning_rate_fn: LearningRateCallable,\n num_microbatches: Optional[int],\n weight_metrics_computer: Optional[WeightMetricsComputer] = None,\n use_wandb=True,\n packing_strategy=None,\n log_weights=False\n ):\n def _partitioned_train_step(self) -> PartitionedTrainCallable:\n def train_step(train_state: train_state_lib.TrainState, batch: BatchType, static_args=None):\n def _partitioned_eval_step(self) -> PartitionedEvalCallable:\ndef _warn_action_not_run(action, task, metric):\n def run(self, train_state: train_state_lib.TrainState,\n metrics_by_task: Mapping[str, MetricValueMapType]) -> bool:\n def __init__(self,\n metric: Tuple[str, str],\n mode: str,\n patience: int = 3,\n atol: float = 0.,\n rtol: float = 0.):\n def _compare_fn(self, current, previous):\n def run(self, train_state: train_state_lib.TrainState,\n metrics_by_task: Mapping[str, MetricValueMapType]) -> bool:\n def __init__(self, task: str, metric: str = \"loss\"):\n def run(self, train_state: train_state_lib.TrainState,\n metrics_by_task: Mapping[str, MetricValueMapType]) -> bool:\nclass ArrayMapFuture(typing_extensions.Protocol):\nclass MetricValueMapFuture(typing_extensions.Protocol):\nclass TimeFuture(typing_extensions.Protocol):\nclass LearningRateCallable(typing_extensions.Protocol):\nclass SummarizeMetricsCallable(typing_extensions.Protocol):\nclass PartitionedTrainCallable(typing_extensions.Protocol):\nclass PartitionedEvalCallable(typing_extensions.Protocol):\nclass GradNormComputer(object):\nclass WeightMetricsComputer(object):\nclass _AsyncTimer(object):\nclass MetricsManager(object):\nclass PreemptionError(Exception):\nclass BaseTrainer(abc.ABC):\nclass BaseTrainerConstructor(Protocol):\nclass Trainer(BaseTrainer):\nclass ActionMode(enum.Enum):\nclass BaseAction(abc.ABC):\nclass EarlyStoppingAction(BaseAction):\nclass TerminateOnNanAction(BaseAction):\n _WEIGHT_METRICS = [\n \"weight_rms\", \"weight_gradient_rms\", \"weight_update_rms\", \"weight_max\"\n ]\n TRAIN = 1\n TRAIN_EVAL = 2\n INFER_EVAL = 3" } ]
import collections import contextlib import os import chex import clu.metrics import clu.values import flax import jax import jax.numpy as jnp import numpy as np import tensorflow as tf from absl.testing import absltest from absl.testing import parameterized from clu import metric_writers from jax._src import dispatch as jax_dispatch from t5x import metrics as metrics_lib from t5x import models as models_lib from t5x import optimizers from t5x import partitioning from t5x import test_utils from t5x import train_state as train_state_lib from t5x import trainer as trainer_lib from tensorflow.io import gfile
10,061
"""Tests for t5x.trainer_lib.""" mock = absltest.mock jax.config.parse_flags_with_absl() FlaxMutables = flax.core.FrozenDict # Make `log_elapsed_time` a no-op to simplify mocking of `time.time()`. @contextlib.contextmanager def fake_log_elapsed_time(_): yield jax_dispatch.log_elapsed_time = fake_log_elapsed_time def _validate_events(test_case, summary_dir, expected_metrics, steps): summaries = gfile.listdir(summary_dir) test_case.assertLen(summaries, 1) summary_path = os.path.join(summary_dir, summaries[0]) event_file = os.path.join(summary_path) events = list(tf.compat.v1.train.summary_iterator(event_file)) actual_events = {} # First event is boilerplate test_case.assertLen(events, len(steps) + 1) for step, event in zip(steps, events[1:]): test_case.assertEqual(event.step, step) test_case.assertLen(event.summary.value, 1) tensor = event.summary.value[0].tensor if tensor.string_val: actual_events[event.summary.value[0].tag] = tensor.string_val[0].decode() else: actual_events[event.summary.value[0].tag] = float(tf.make_ndarray(tensor)) <<<<<<< HEAD jax.tree_map(test_case.assertAlmostEqual, actual_events, expected_metrics) ======= jax.tree_map(test_case.assertAlmostEqual, actual_events, expected_metrics) >>>>>>> 1f8cec78b1f28f1955d70741792d7b6e7dd76226 class MetricsManagerTest(absltest.TestCase): def setUp(self): super().setUp() self.model_dir = self.create_tempdir().full_path def test_summary_dir(self): # All hosts have the summary dir. with mock.patch('jax.process_index', return_value=0): mm = trainer_lib.MetricsManager('eval', self.model_dir) self.assertEqual(mm.summary_dir, os.path.join(self.model_dir, 'eval')) mm.close() with mock.patch('jax.process_index', return_value=1): mm = trainer_lib.MetricsManager('eval', self.model_dir) self.assertEqual(mm.summary_dir, os.path.join(self.model_dir, 'eval')) mm.close() def test_summary_writer(self): # Only host 0 creates a non-empty summary writer. with mock.patch('jax.process_index', return_value=1): mm = trainer_lib.MetricsManager('eval', self.model_dir) self.assertFalse(gfile.exists(mm.summary_dir)) mm.close() with mock.patch('jax.process_index', return_value=0): mm = trainer_lib.MetricsManager('eval', self.model_dir) self.assertIsInstance(mm.summary_writer, metric_writers.MetricWriter) self.assertTrue(gfile.exists(mm.summary_dir)) mm.close() def test_write_scalar(self): gfile.makedirs(os.path.join(self.model_dir, 'eval')) # tag, value, step scalars = [('loss', 1.0, 1), ('accuracy', 100.0, 2)] # Only host 0 has actually writes summaries. with mock.patch('jax.process_index', return_value=1): mm = trainer_lib.MetricsManager('eval', self.model_dir) for s in scalars: mm.write_scalar(*s) self.assertEmpty(gfile.listdir(mm.summary_dir)) mm.close() with mock.patch('jax.process_index', return_value=0): mm = trainer_lib.MetricsManager('eval', self.model_dir) for s in scalars: mm.write_scalar(*s) mm.flush() summaries = gfile.listdir(mm.summary_dir) self.assertLen(summaries, 1) event_file = os.path.join(mm.summary_dir, summaries[0]) events = list(tf.compat.v1.train.summary_iterator(event_file)) # First event is boilerplate self.assertLen(events, 3) for event, (tag, value, step) in zip(events[1:], scalars): self.assertEqual(event.step, step) self.assertLen(event.summary.value, 1) self.assertEqual(event.summary.value[0].tag, tag) self.assertEqual(tf.make_ndarray(event.summary.value[0].tensor), value) mm.close() def test_write_metrics_summary(self): gfile.makedirs(os.path.join(self.model_dir, 'eval')) @flax.struct.dataclass class MockTextMetric(clu.metrics.Metric): def compute_value(self): return clu.values.Text('test metric') accumulated_metrics = {
# Copyright 2022 The T5X Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for t5x.trainer_lib.""" mock = absltest.mock jax.config.parse_flags_with_absl() FlaxMutables = flax.core.FrozenDict # Make `log_elapsed_time` a no-op to simplify mocking of `time.time()`. @contextlib.contextmanager def fake_log_elapsed_time(_): yield jax_dispatch.log_elapsed_time = fake_log_elapsed_time def _validate_events(test_case, summary_dir, expected_metrics, steps): summaries = gfile.listdir(summary_dir) test_case.assertLen(summaries, 1) summary_path = os.path.join(summary_dir, summaries[0]) event_file = os.path.join(summary_path) events = list(tf.compat.v1.train.summary_iterator(event_file)) actual_events = {} # First event is boilerplate test_case.assertLen(events, len(steps) + 1) for step, event in zip(steps, events[1:]): test_case.assertEqual(event.step, step) test_case.assertLen(event.summary.value, 1) tensor = event.summary.value[0].tensor if tensor.string_val: actual_events[event.summary.value[0].tag] = tensor.string_val[0].decode() else: actual_events[event.summary.value[0].tag] = float(tf.make_ndarray(tensor)) <<<<<<< HEAD jax.tree_map(test_case.assertAlmostEqual, actual_events, expected_metrics) ======= jax.tree_map(test_case.assertAlmostEqual, actual_events, expected_metrics) >>>>>>> 1f8cec78b1f28f1955d70741792d7b6e7dd76226 class MetricsManagerTest(absltest.TestCase): def setUp(self): super().setUp() self.model_dir = self.create_tempdir().full_path def test_summary_dir(self): # All hosts have the summary dir. with mock.patch('jax.process_index', return_value=0): mm = trainer_lib.MetricsManager('eval', self.model_dir) self.assertEqual(mm.summary_dir, os.path.join(self.model_dir, 'eval')) mm.close() with mock.patch('jax.process_index', return_value=1): mm = trainer_lib.MetricsManager('eval', self.model_dir) self.assertEqual(mm.summary_dir, os.path.join(self.model_dir, 'eval')) mm.close() def test_summary_writer(self): # Only host 0 creates a non-empty summary writer. with mock.patch('jax.process_index', return_value=1): mm = trainer_lib.MetricsManager('eval', self.model_dir) self.assertFalse(gfile.exists(mm.summary_dir)) mm.close() with mock.patch('jax.process_index', return_value=0): mm = trainer_lib.MetricsManager('eval', self.model_dir) self.assertIsInstance(mm.summary_writer, metric_writers.MetricWriter) self.assertTrue(gfile.exists(mm.summary_dir)) mm.close() def test_write_scalar(self): gfile.makedirs(os.path.join(self.model_dir, 'eval')) # tag, value, step scalars = [('loss', 1.0, 1), ('accuracy', 100.0, 2)] # Only host 0 has actually writes summaries. with mock.patch('jax.process_index', return_value=1): mm = trainer_lib.MetricsManager('eval', self.model_dir) for s in scalars: mm.write_scalar(*s) self.assertEmpty(gfile.listdir(mm.summary_dir)) mm.close() with mock.patch('jax.process_index', return_value=0): mm = trainer_lib.MetricsManager('eval', self.model_dir) for s in scalars: mm.write_scalar(*s) mm.flush() summaries = gfile.listdir(mm.summary_dir) self.assertLen(summaries, 1) event_file = os.path.join(mm.summary_dir, summaries[0]) events = list(tf.compat.v1.train.summary_iterator(event_file)) # First event is boilerplate self.assertLen(events, 3) for event, (tag, value, step) in zip(events[1:], scalars): self.assertEqual(event.step, step) self.assertLen(event.summary.value, 1) self.assertEqual(event.summary.value[0].tag, tag) self.assertEqual(tf.make_ndarray(event.summary.value[0].tensor), value) mm.close() def test_write_metrics_summary(self): gfile.makedirs(os.path.join(self.model_dir, 'eval')) @flax.struct.dataclass class MockTextMetric(clu.metrics.Metric): def compute_value(self): return clu.values.Text('test metric') accumulated_metrics = {
'loss': metrics_lib.Sum(40.0),
9
2023-12-12 20:23:33+00:00
12k
SafeAILab/EAGLE
train/main.py
[ { "identifier": "Model", "path": "model/cnets.py", "snippet": "class Model(nn.Module):\r\n def __init__(self,config,load_emb=False,path=None):\r\n super().__init__()\r\n\r\n\r\n\r\n\r\n self.gradient_checkpointing = True\r\n self.padding_idx = config.pad_token_id\r\n self.vocab_size = config.vocab_size\r\n\r\n self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)\r\n if load_emb:\r\n from safetensors import safe_open\r\n import json\r\n try:\r\n with open(os.path.join(path,\"model.safetensors.index.json\"),\"r\") as f:\r\n index_json=json.loads(f.read())\r\n emb_path=index_json[\"weight_map\"][\"model.embed_tokens.weight\"]\r\n with safe_open(os.path.join(path,emb_path),\r\n framework=\"pt\",\r\n device=\"cpu\") as f:\r\n tensor_slice = f.get_slice(\"model.embed_tokens.weight\")\r\n vocab_size, hidden_dim = tensor_slice.get_shape()\r\n tensor = tensor_slice[:, :hidden_dim].float()\r\n except:\r\n with open(os.path.join(path, \"pytorch_model.bin.index.json\"), \"r\") as f:\r\n index_json = json.loads(f.read())\r\n emb_path = index_json[\"weight_map\"][\"model.embed_tokens.weight\"]\r\n weights=torch.load(os.path.join(path,emb_path))\r\n tensor=weights[\"model.embed_tokens.weight\"].float()\r\n self.embed_tokens.weight.data = tensor\r\n\r\n\r\n #self.init_tree()\r\n\r\n self.layers = nn.ModuleList([LlamaDecoderLayer(config,index) for index in range(config.num_hidden_layers)])\r\n self.fc=nn.Linear(2*config.hidden_size,config.hidden_size)\r\n self.act=ACT2FN[config.hidden_act]\r\n for param in self.embed_tokens.parameters():\r\n param.requires_grad = False\r\n\r\n\r\n def init_tree(self):\r\n self.tree = mc_sim_7b_63\r\n self.tree_buffer=generate_tree_buffers(self.tree,self.embed_tokens.weight.device)\r\n\r\n\r\n def reset(self):\r\n self.tree_mask=None\r\n\r\n\r\n def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):\r\n # create causal mask\r\n # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]\r\n combined_attention_mask = None\r\n if input_shape[-1] > 1:\r\n combined_attention_mask = _make_causal_mask(\r\n input_shape,\r\n #inputs_embeds.dtype,\r\n torch.float32, # [MODIFIED] force to cast to float32\r\n device=inputs_embeds.device,\r\n past_key_values_length=past_key_values_length,\r\n )\r\n\r\n if attention_mask is not None:\r\n # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]\r\n expanded_attn_mask = _expand_mask(attention_mask, torch.float32, tgt_len=input_shape[-1]).to(\r\n inputs_embeds.device\r\n )\r\n combined_attention_mask = (\r\n expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask\r\n )\r\n\r\n # [MODIFIED] add tree mask\r\n if hasattr(self, \"tree_mask\") and self.tree_mask is not None:\r\n tree_mask = self.tree_mask\r\n tree_len = tree_mask.size(-1)\r\n combined_attention_mask[:, :, -tree_len:, -tree_len:][\r\n tree_mask == 0\r\n ] = torch.finfo(torch.float32).min\r\n\r\n\r\n return combined_attention_mask\r\n\r\n def forward(\r\n self,\r\n hidden_states,\r\n input_ids,\r\n attention_mask: Optional[torch.Tensor] = None,\r\n position_ids: Optional[torch.LongTensor] = None,\r\n past_key_values: Optional[List[torch.FloatTensor]] = None,\r\n inputs_embeds: Optional[torch.FloatTensor] = None,\r\n use_cache: Optional[bool] = None,\r\n output_attentions: Optional[bool] = None,\r\n output_hidden_states: Optional[bool] = None,\r\n return_dict: Optional[bool] = None,\r\n std=None\r\n ):\r\n batch_size, seq_length, _ = hidden_states.shape\r\n seq_length_with_past = seq_length\r\n past_key_values_length = 0\r\n\r\n with torch.no_grad():\r\n inputs_embeds = self.embed_tokens(input_ids)\r\n #inputs_embeds = inputs_embeds.detach()\r\n\r\n # if std is not None:\r\n # noise = torch.randn(inputs_embeds.size(),device=inputs_embeds.device) * std\r\n # inputs_embeds=inputs_embeds+noise\r\n\r\n if past_key_values is not None:\r\n past_key_values_length = past_key_values[0][0].shape[2]\r\n seq_length_with_past = seq_length_with_past + past_key_values_length\r\n if position_ids is None:\r\n device = hidden_states.device if hidden_states is not None else inputs_embeds.device\r\n position_ids = torch.arange(\r\n past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device\r\n )\r\n position_ids = position_ids.unsqueeze(0).view(-1, seq_length)\r\n else:\r\n position_ids = position_ids.view(-1, seq_length).long()\r\n\r\n if attention_mask is None:\r\n attention_mask = torch.ones(\r\n (batch_size, seq_length_with_past), dtype=torch.bool, device=hidden_states.device\r\n )\r\n attention_mask = self._prepare_decoder_attention_mask(\r\n attention_mask, (batch_size, seq_length), hidden_states, past_key_values_length\r\n )\r\n\r\n # if self.gradient_checkpointing and self.training:\r\n # if use_cache:\r\n # use_cache = False\r\n\r\n\r\n #hidden_states=self.act(self.fc(torch.cat((inputs_embeds,hidden_states),dim=-1)))\r\n inputs_embeds=inputs_embeds.to(hidden_states.dtype)\r\n hidden_states = self.fc(torch.cat((inputs_embeds, hidden_states), dim=-1))\r\n\r\n\r\n all_hidden_states = () if output_hidden_states else None\r\n next_decoder_cache = () if use_cache else None\r\n\r\n for idx, decoder_layer in enumerate(self.layers):\r\n if output_hidden_states:\r\n all_hidden_states += (hidden_states,)\r\n\r\n past_key_value = past_key_values[idx] if past_key_values is not None else None\r\n\r\n if self.gradient_checkpointing and self.training:\r\n\r\n def create_custom_forward(module):\r\n def custom_forward(*inputs):\r\n # None for past_key_value\r\n return module(*inputs, past_key_value, output_attentions)\r\n\r\n return custom_forward\r\n\r\n layer_outputs = torch.utils.checkpoint.checkpoint(\r\n create_custom_forward(decoder_layer),\r\n hidden_states,\r\n attention_mask,\r\n position_ids,\r\n )\r\n else:\r\n layer_outputs = decoder_layer(\r\n hidden_states,\r\n attention_mask=attention_mask,\r\n position_ids=position_ids,\r\n past_key_value=past_key_value,\r\n output_attentions=output_attentions,\r\n use_cache=use_cache,\r\n )\r\n\r\n hidden_states = layer_outputs[0]\r\n\r\n if use_cache:\r\n next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)\r\n\r\n if use_cache:\r\n return hidden_states,next_decoder_cache\r\n\r\n return hidden_states\r\n\r\n @torch.no_grad()\r\n def generate(self,hidden_states,input_ids,head,max_length=4,use_cache=False):\r\n return_input_ids=copy.deepcopy(input_ids[0].tolist())\r\n input_ids=input_ids[:,1:]\r\n\r\n #input_ids=input_ids.to(hidden_states.device)\r\n if use_cache:\r\n past_key_values=None\r\n for i in range(max_length):\r\n if past_key_values!=None:\r\n out_hidden,past_key_values = self(out_hidden[:, -1:], input_ids=torch.tensor([[token]]).to(input_ids.device),past_key_values=past_key_values,use_cache=True)\r\n else:\r\n out_hidden, past_key_values = self(hidden_states, input_ids=input_ids,use_cache=True)\r\n last_hidden = out_hidden[:, -1]\r\n last_headout = head(last_hidden)\r\n token = torch.argmax(last_headout)\r\n #input_ids = torch.cat((input_ids, torch.tensor([[token]]).to(input_ids.device)), dim=1)\r\n return_input_ids.append(token.item())\r\n if token == 2:\r\n break\r\n #hidden_states = torch.cat((hidden_states, out_hidden[:, -1:]), dim=1)\r\n else:\r\n for i in range(max_length):\r\n out_hidden=self(hidden_states,input_ids=input_ids)\r\n last_hidden = out_hidden[:, -1]\r\n last_headout = head(last_hidden)\r\n token = torch.argmax(last_headout)\r\n return_input_ids.append(token.item())\r\n input_ids = torch.cat((input_ids, torch.tensor([[token]]).to(input_ids.device)), dim=1)\r\n if token==2:\r\n break\r\n hidden_states = torch.cat((hidden_states, out_hidden[:, -1:]), dim=1)\r\n\r\n return return_input_ids\r\n\r\n @torch.no_grad()\r\n def repeat_kv(self,kv,numr):\r\n newkv=[]\r\n for i in kv:\r\n newkv.append((i[0].repeat(numr,1,1,1),i[1].repeat(numr,1,1,1)))\r\n return tuple(newkv)\r\n\r\n @torch.no_grad()\r\n def reduce_kv(self,kv,numr):\r\n newkv=[]\r\n for i in kv:\r\n newkv.append((i[0][:numr],i[1][:numr]))\r\n return tuple(newkv)\r\n\r\n\r\n def reset_kv(self):\r\n self.stable_kv=None\r\n\r\n @torch.no_grad()\r\n def repeat_hidden(self,hidden_state,repeat_num):\r\n new_hidden=[]\r\n for id,i in enumerate(repeat_num):\r\n new_hidden.append(hidden_state[:,id:id+1].repeat(1,i,1))\r\n return torch.cat(new_hidden,dim=1)\r\n\r\n # @torch.no_grad()\r\n # def sample(self,tensor,k=1,replacement=True):\r\n # probabilities = torch.nn.functional.softmax(tensor, dim=1)\r\n # sampled_indices = torch.multinomial(probabilities, k,replacement=replacement)\r\n # sampled_probs = torch.gather(probabilities, 1, sampled_indices)\r\n #\r\n # return sampled_indices,sampled_probs\r\n\r\n def sample(self,logits, logits_processor,k=1, replacement=False):\r\n logits = logits_processor(None, logits)\r\n probabilities = torch.nn.functional.softmax(logits, dim=1)\r\n sampled_indices = torch.multinomial(probabilities, k, replacement=False)\r\n sampled_probs = torch.gather(probabilities, 1, sampled_indices)\r\n\r\n cumulative_sum = torch.cumsum(sampled_probs, dim=1)\r\n cumulative_sum = torch.cat(\r\n (torch.zeros(cumulative_sum.shape[0], 1, device=cumulative_sum.device), cumulative_sum[:, :-1]), dim=-1)\r\n\r\n sampled_probs = sampled_probs / (1 - cumulative_sum)\r\n sampled_probs[torch.isinf(sampled_probs)] = -1\r\n sampled_probs[torch.isnan(sampled_probs)] = -1\r\n\r\n sampled_probs = torch.clamp(sampled_probs, min=0.0, max=1.0)\r\n\r\n return sampled_indices, sampled_probs,probabilities\r\n\r\n # if replacement:\r\n # sampled_indices = torch.multinomial(probabilities, k, replacement=True)\r\n # sampled_probs = torch.gather(probabilities, 1, sampled_indices)\r\n # return sampled_indices, sampled_probs\r\n # else:\r\n # sampled_indices = torch.multinomial(probabilities, k, replacement=False)\r\n # sampled_probs = torch.gather(probabilities, 1, sampled_indices)\r\n #\r\n # cumulative_sum = torch.cumsum(sampled_probs, dim=1)\r\n # cumulative_sum = torch.cat((torch.zeros(cumulative_sum.shape[0],1, device=cumulative_sum.device), cumulative_sum[:, :-1]),dim=-1)\r\n #\r\n # sampled_probs=sampled_probs/(1-cumulative_sum)\r\n # sampled_probs[torch.isinf(sampled_probs)] = -1\r\n # sampled_probs[torch.isnan(sampled_probs)] = -1\r\n #\r\n # sampled_probs = torch.clamp(sampled_probs, min=0.0, max=1.0)\r\n #\r\n # # has_nan = torch.isnan(sampled_probs).any()\r\n # # if has_nan:\r\n # # print(1)\r\n #\r\n # # sampled_probs_list=sampled_probs[0].tolist()\r\n # # sum_list=[1-sum(sampled_probs_list[:i]) for i in range(len(sampled_probs_list))]\r\n # # for i in range(len(sampled_probs_list)):\r\n # # a=sampled_probs_list[i]/(sum_list[i])\r\n # # if sum_list[i]==0:\r\n # # sampled_probs_list[i]=1.0\r\n # # else:\r\n # # sampled_probs_list[i]=sampled_probs_list[i]/(sum_list[i])\r\n # # sampled_probs=torch.tensor([sampled_probs_list],device=sampled_probs.device)\r\n #\r\n #\r\n #\r\n # return sampled_indices, sampled_probs\r\n\r\n @torch.no_grad()\r\n def topK_genrate(self, hidden_states, input_ids, head, logits_processor,max_length=4, use_cache=True):\r\n # test_=input_ids\r\n # input_ids = torch.tensor([state[1:]])\r\n input_ids = input_ids[:, 1:]\r\n input_ids = input_ids.to(hidden_states.device)\r\n ss_token,ss_prob,ss_op = [],[],[]\r\n len_posi=input_ids.shape[1]\r\n self.reset()\r\n if use_cache:\r\n\r\n\r\n if hasattr(self, \"stable_kv\") and self.stable_kv is not None:\r\n kv_len=self.stable_kv[0][0].shape[2]\r\n out_hidden, past_key_values = self(hidden_states, input_ids=input_ids[:,kv_len:], past_key_values=self.stable_kv,use_cache=True)\r\n else:\r\n out_hidden, past_key_values = self(hidden_states, input_ids=input_ids, use_cache=True)\r\n self.stable_kv=past_key_values\r\n last_hidden = out_hidden[:, -1]\r\n if not self.diff_device:\r\n last_headout = head(last_hidden)\r\n else:\r\n if hasattr(self, \"layer_device\"):\r\n last_headout = head(last_hidden)\r\n last_headout=last_headout.to(self.layer_device)\r\n else:\r\n last_headout=F.linear(last_hidden,self.headweight)\r\n\r\n\r\n\r\n for i in range(len(self.tree_buffer['tree_indices'])):\r\n if logits_processor is not None:\r\n topk_index,topk_prob,op=self.sample(last_headout,logits_processor,k=top_k,)\r\n else:\r\n top=torch.topk(last_headout, top_k, dim=-1)\r\n topk_index,topk_prob = top.indices,top.values\r\n op=None\r\n\r\n ss_token.append(topk_index)\r\n ss_prob.append(topk_prob)\r\n ss_op.append(op)\r\n #topk_index = torch.topk(last_headout, top_k, dim=-1).indices\r\n topk_index = topk_index.view(-1)\r\n select_index=topk_index[self.tree_buffer['tree_indices'][i]]\r\n #len_sq=select_index.shape[0]\r\n input_ids=select_index[None,:]\r\n if i==0:\r\n hidden_states = out_hidden[:, -1:]\r\n else:\r\n hidden_states=out_hidden\r\n hidden_states=self.repeat_hidden(hidden_states,self.tree_buffer[\"repeat_nums\"][i])\r\n #hidden_states = hidden_states.repeat(1,len_sq,1)\r\n self.tree_mask=self.tree_buffer['attn_mask'][i]\r\n position_ids=len_posi+self.tree_buffer[\"position_ids\"][i]\r\n out_hidden, past_key_values = self(hidden_states, input_ids=input_ids, past_key_values=past_key_values,\r\n position_ids=position_ids,use_cache=True)\r\n len_posi += 1\r\n\r\n if not self.diff_device:\r\n last_headout = head(out_hidden[0])\r\n else:\r\n if hasattr(self, \"layer_device\"):\r\n last_headout = head(out_hidden[0])\r\n last_headout = last_headout.to(self.layer_device)\r\n else:\r\n last_headout = F.linear(out_hidden[0], self.headweight)\r\n #last_headout = head(out_hidden[0])\r\n #sslogits.append(last_headout)\r\n #print(select_index)\r\n\r\n if logits_processor is not None:\r\n topk_index,topk_prob,op=self.sample(last_headout,logits_processor,k=top_k,)\r\n else:\r\n top = torch.topk(last_headout, top_k, dim=-1)\r\n topk_index, topk_prob = top.indices, top.values\r\n op=None\r\n ss_token.append(topk_index)\r\n ss_prob.append(topk_prob)\r\n ss_op.append(op)\r\n\r\n else:\r\n # TODO\r\n pass\r\n\r\n return (torch.cat(ss_token),torch.cat(ss_prob),ss_op)\r\n\r\n\r\n\r\n\r\n @torch.no_grad()\r\n def acc(self,data,head,max_length=5):\r\n hidden_states=data[\"hidden_states\"]\r\n input_ids=data[\"input_ids\"]\r\n #attention_mask=data[\"attention_mask\"]\r\n loss_mask=data[\"loss_mask\"]\r\n sample_mask=data[\"sample_mask\"]\r\n target=data[\"target\"]\r\n total=[0 for _ in range(max_length)]\r\n correct=[0 for _ in range(max_length)]\r\n bs,sl=hidden_states.shape[0],hidden_states.shape[1]\r\n target_headout = head(target)\r\n hidden_states_headout=head(hidden_states)\r\n\r\n for i in range(bs):\r\n for j in range(sl):\r\n if loss_mask[i,j]==0:\r\n continue\r\n single_hidden_states=hidden_states[i,:j]\r\n single_input_ids=input_ids[i,:j]\r\n\r\n\r\n single_hidden_states = single_hidden_states[None, :, :]\r\n single_input_ids = single_input_ids[None, :]\r\n for k in range(max_length):\r\n tmp_in_target_headout = hidden_states_headout[i,single_hidden_states.shape[1]-1]\r\n tmp_out_target_headout = target_headout[i, single_hidden_states.shape[1]-1]\r\n target_in_token = torch.argmax(tmp_in_target_headout)\r\n target_out_token = torch.argmax(tmp_out_target_headout)\r\n tmp_token=input_ids[i,single_hidden_states.shape[1]-1]\r\n tmp_sample_mask=sample_mask[i,single_hidden_states.shape[1]-1]\r\n if not (target_in_token==tmp_token):\r\n break\r\n out_hidden = self(single_hidden_states, input_ids=single_input_ids)\r\n last_hidden = out_hidden[:, -1]\r\n last_headout = head(last_hidden)\r\n token = torch.argmax(last_headout)\r\n total[k] += 1\r\n if token==target_out_token:\r\n correct[k]+=1\r\n else:\r\n for kk in range(k,max_length):\r\n total[kk]+=1\r\n break\r\n\r\n single_hidden_states=torch.cat((single_hidden_states,out_hidden[:,-1:]),dim=1)\r\n single_input_ids = torch.cat((single_input_ids, torch.tensor([[token]]).to(single_input_ids.device)), dim=1)\r\n\r\n\r\n acc=[correct[i]/total[i] for i in range(len(correct))]\r\n return acc\r" }, { "identifier": "EConfig", "path": "model/configs.py", "snippet": "class EConfig(PretrainedConfig):\r\n r\"\"\"\r\n This is the configuration class to store the configuration of a [`LlamaModel`]. It is used to instantiate an LLaMA\r\n model according to the specified arguments, defining the model architecture. Instantiating a configuration with the\r\n defaults will yield a similar configuration to that of the LLaMA-7B.\r\n\r\n Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\r\n documentation from [`PretrainedConfig`] for more information.\r\n\r\n\r\n Args:\r\n vocab_size (`int`, *optional*, defaults to 32000):\r\n Vocabulary size of the LLaMA model. Defines the number of different tokens that can be represented by the\r\n `inputs_ids` passed when calling [`LlamaModel`]\r\n hidden_size (`int`, *optional*, defaults to 4096):\r\n Dimension of the hidden representations.\r\n intermediate_size (`int`, *optional*, defaults to 11008):\r\n Dimension of the MLP representations.\r\n num_hidden_layers (`int`, *optional*, defaults to 32):\r\n Number of hidden layers in the Transformer encoder.\r\n num_attention_heads (`int`, *optional*, defaults to 32):\r\n Number of attention heads for each attention layer in the Transformer encoder.\r\n num_key_value_heads (`int`, *optional*):\r\n This is the number of key_value heads that should be used to implement Grouped Query Attention. If\r\n `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if\r\n `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When\r\n converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed\r\n by meanpooling all the original heads within that group. For more details checkout [this\r\n paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to\r\n `num_attention_heads`.\r\n pretraining_tp (`int`, *optional*, defaults to `1`):\r\n Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this\r\n document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is\r\n necessary to ensure exact reproducibility of the pretraining results. Please refer to [this\r\n issue](https://github.com/pytorch/pytorch/issues/76232).\r\n hidden_act (`str` or `function`, *optional*, defaults to `\"silu\"`):\r\n The non-linear activation function (function or string) in the decoder.\r\n max_position_embeddings (`int`, *optional*, defaults to 2048):\r\n The maximum sequence length that this model might ever be used with. Typically set this to something large\r\n just in case (e.g., 512 or 1024 or 2048).\r\n initializer_range (`float`, *optional*, defaults to 0.02):\r\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\r\n rms_norm_eps (`float`, *optional*, defaults to 1e-12):\r\n The epsilon used by the rms normalization layers.\r\n use_cache (`bool`, *optional*, defaults to `True`):\r\n Whether or not the model should return the last key/values attentions (not used by all models). Only\r\n relevant if `config.is_decoder=True`.\r\n tie_word_embeddings(`bool`, *optional*, defaults to `False`):\r\n Whether to tie weight embeddings\r\n rope_scaling (`Dict`, *optional*):\r\n Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling\r\n strategies: linear and dynamic. Their scaling factor must be an float greater than 1. The expected format\r\n is `{\"type\": strategy name, \"factor\": scaling factor}`. When using this flag, don't update\r\n `max_position_embeddings` to the expected new maximum. See the following thread for more information on how\r\n these scaling strategies behave:\r\n https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an\r\n experimental feature, subject to breaking API changes in future versions.\r\n\r\n Example:\r\n\r\n ```python\r\n >>> from transformers import LlamaModel, LlamaConfig\r\n\r\n >>> # Initializing a LLaMA llama-7b style configuration\r\n >>> configuration = LlamaConfig()\r\n\r\n >>> # Initializing a model from the llama-7b style configuration\r\n >>> model = LlamaModel(configuration)\r\n\r\n >>> # Accessing the model configuration\r\n >>> configuration = model.config\r\n ```\"\"\"\r\n model_type = \"llama\"\r\n keys_to_ignore_at_inference = [\"past_key_values\"]\r\n\r\n def __init__(\r\n self,\r\n vocab_size=32000,\r\n hidden_size=4096,\r\n intermediate_size=11008,\r\n num_hidden_layers=32,\r\n num_attention_heads=32,\r\n num_key_value_heads=None,\r\n hidden_act=\"silu\",\r\n max_position_embeddings=2048,\r\n initializer_range=0.02,\r\n rms_norm_eps=1e-6,\r\n use_cache=True,\r\n pad_token_id=None,\r\n bos_token_id=1,\r\n eos_token_id=2,\r\n pretraining_tp=1,\r\n tie_word_embeddings=False,\r\n rope_scaling=None,\r\n **kwargs,\r\n ):\r\n self.vocab_size = vocab_size\r\n self.max_position_embeddings = max_position_embeddings\r\n self.hidden_size = hidden_size\r\n self.intermediate_size = intermediate_size\r\n self.num_hidden_layers = num_hidden_layers\r\n self.num_attention_heads = num_attention_heads\r\n\r\n # for backward compatibility\r\n if num_key_value_heads is None:\r\n num_key_value_heads = num_attention_heads\r\n\r\n self.num_key_value_heads = num_key_value_heads\r\n self.hidden_act = hidden_act\r\n self.initializer_range = initializer_range\r\n self.rms_norm_eps = rms_norm_eps\r\n self.pretraining_tp = pretraining_tp\r\n self.use_cache = use_cache\r\n self.rope_scaling = rope_scaling\r\n self._rope_scaling_validation()\r\n\r\n super().__init__(\r\n pad_token_id=pad_token_id,\r\n bos_token_id=bos_token_id,\r\n eos_token_id=eos_token_id,\r\n tie_word_embeddings=tie_word_embeddings,\r\n **kwargs,\r\n )\r\n\r\n def _rope_scaling_validation(self):\r\n \"\"\"\r\n Validate the `rope_scaling` configuration.\r\n \"\"\"\r\n if self.rope_scaling is None:\r\n return\r\n\r\n if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:\r\n raise ValueError(\r\n \"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, \"\r\n f\"got {self.rope_scaling}\"\r\n )\r\n rope_scaling_type = self.rope_scaling.get(\"type\", None)\r\n rope_scaling_factor = self.rope_scaling.get(\"factor\", None)\r\n if rope_scaling_type is None or rope_scaling_type not in [\"linear\", \"dynamic\"]:\r\n raise ValueError(\r\n f\"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}\"\r\n )\r\n if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:\r\n raise ValueError(f\"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}\")" } ]
import argparse import json import os import torch import numpy as np import wandb from safetensors import safe_open from accelerate import Accelerator from accelerate.utils import set_seed from model.cnets import Model from model.configs import EConfig from typing import Any, Dict, List from torch import nn, optim from torch.utils.data import Dataset, DataLoader from tqdm import tqdm from transformers import get_linear_schedule_with_warmup, AutoConfig
9,190
batch_input_ids = torch.cat([self.paddingtensor2D(item['input_ids'], max_length) for item in features]) batch_hidden_states = torch.cat([self.paddingtensor(item['hidden_state_big'], max_length) for item in features]) batch_target = torch.cat([self.paddingtensor(item['target'], max_length) for item in features]) batch_loss_mask = torch.tensor( [item['loss_mask'] + [0] * (max_length - len(item['loss_mask'])) for item in features]) batch_attention_mask = torch.tensor( [item['attention_mask'] + [0] * (max_length - len(item['attention_mask'])) for item in features]) # batch_loss_mask = torch.ones_like(batch_loss_mask) # batch_attention_mask=torch.ones_like(batch_attention_mask) batch = { "input_ids": batch_input_ids, "hidden_states": batch_hidden_states, "target": batch_target, "attention_mask": batch_attention_mask, "loss_mask": batch_loss_mask, } return batch def top_accuracy(output, target, topk=(1,)): # output.shape (bs, num_classes), target.shape (bs, ) """Computes the accuracy over the k top predictions for the specified values of k""" with torch.no_grad(): maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True) res.append(correct_k) return res @torch.no_grad() def getkacc(model, data, head, max_length=5): hidden_states = data["hidden_states"] input_ids = data["input_ids"] # attention_mask=data["attention_mask"] loss_mask = data["loss_mask"] # sample_mask=data["sample_mask"] target = data["target"] total = [0 for _ in range(max_length)] correct = [0 for _ in range(max_length)] bs, sl = hidden_states.shape[0], hidden_states.shape[1] target_headout = head(target) hidden_states_headout = head(hidden_states) for i in range(bs): for j in range(sl): single_hidden_states = hidden_states[i, :j] single_input_ids = input_ids[i, :j] single_hidden_states = single_hidden_states[None, :, :] single_input_ids = single_input_ids[None, :] for k in range(max_length): if loss_mask[i, single_hidden_states.shape[1] - 1] == 0: break tmp_in_target_headout = hidden_states_headout[i, single_hidden_states.shape[1] - 1] tmp_out_target_headout = target_headout[i, single_hidden_states.shape[1] - 1] target_in_token = torch.argmax(tmp_in_target_headout) target_out_token = torch.argmax(tmp_out_target_headout) tmp_token = input_ids[i, single_hidden_states.shape[1] - 1] # tmp_sample_mask=sample_mask[i,single_hidden_states.shape[1]-1] if not (target_in_token == tmp_token): break out_hidden = model(single_hidden_states, input_ids=single_input_ids) last_hidden = out_hidden[:, -1] last_headout = head(last_hidden) token = torch.argmax(last_headout) total[k] += 1 if token == target_out_token: correct[k] += 1 else: for kk in range(k + 1, max_length): total[kk] += 1 break single_hidden_states = torch.cat((single_hidden_states, out_hidden[:, -1:]), dim=1) single_input_ids = torch.cat((single_input_ids, torch.tensor([[token]]).to(single_input_ids.device)), dim=1) acc = [correct[i] / total[i] for i in range(len(correct))] return acc if train_config["data_noise"]: if train_config["noise"] == "uniform": aug = AddUniformNoise(std=train_config["std"]) else: aug = AddGaussianNoise(mean=train_config["mean"], std=train_config["std"]) else: aug = None datapath = list_files(train_config["datapath"]) traindatapath = datapath[:int(len(datapath) * 0.95)] testdatapath = datapath[int(len(datapath) * 0.95):] # print('td',train_config["datapath"]) # print(datapath) # exit() traindataset = CustomDataset(traindatapath, transform=aug) testdataset = CustomDataset(testdatapath) train_loader = DataLoader(traindataset, batch_size=train_config["bs"], shuffle=True, collate_fn=DataCollatorWithPadding(), num_workers=train_config["num_workers"], pin_memory=True) test_loader = DataLoader(testdataset, batch_size=train_config["bs"], shuffle=False, collate_fn=DataCollatorWithPadding(), num_workers=train_config["num_workers"], pin_memory=True) # for batch_data in train_loader: # print(batch_data) if accelerator.is_main_process: if not os.path.exists(args.cpdir): os.makedirs(args.cpdir) config = EConfig.from_pretrained(train_config["config_path"])
parser = argparse.ArgumentParser(description='sp') parser.add_argument('--basepath', type=str, default='/home/lyh/weights/hf/vicuna_v13/7B/') parser.add_argument('--configpath', type=str, default="config.json") parser.add_argument('--lr', type=float, default=3e-5) parser.add_argument('--bs', type=int, default=4) parser.add_argument('--gradient-accumulation-steps', type=int, default=8) parser.add_argument('--tmpdir', type=str, default='0') parser.add_argument('--outdir', type=str, default='0') parser.add_argument('--cpdir', type=str, default='0') args = parser.parse_args() train_config = { "lr": args.lr, "bs": args.bs, "gradient_accumulation_steps": args.gradient_accumulation_steps, "datapath": f"{args.tmpdir}", "is_warmup": True, "num_epochs": 20, # Depending on your data and model size, the larger the model, the higher the sample efficiency. We recommend setting it between 20-40. "num_warmup_steps": 2000, "total_steps": 800000, "p_w": 0.1, "v_w": 1.0, "head_w": 0.1, "num_workers": 2, "embeding": True, "act": "No", "data_noise": True, "noise": "uniform", "mean": 0.0, "std": 0.2, "residual": "true,norm", "max_len": 2048, # During training, truncating the training sequences means that the larger the setting, the more training data is used, and the better the effect, but it also consumes more VRAM. "config_path": args.configpath, "b1": 0.9, "b2": 0.95, "grad_clip": 0.5, "save_freq": 5 } # from transformers import AutoModelForCausalLM, AutoTokenizer,AutoModelForSequenceClassification # os.environ["CUDA_VISIBLE_DEVICES"] = "0,1" torch.backends.cuda.matmul.allow_tf32 = True set_seed(0) accelerator = Accelerator(mixed_precision='bf16', gradient_accumulation_steps=train_config["gradient_accumulation_steps"]) # import accelerate if accelerator.is_main_process: wandb.init(project="ess", entity="yuhui-li", config=train_config) baseconfig = AutoConfig.from_pretrained(args.basepath) head = torch.nn.Linear(baseconfig.hidden_size, baseconfig.vocab_size, bias=False) try: with open(os.path.join(args.basepath, "model.safetensors.index.json"), "r") as f: index_json = json.loads(f.read()) head_path = index_json["weight_map"]["lm_head.weight"] with safe_open(os.path.join(args.basepath, head_path), framework="pt", device="cpu") as f: tensor_slice = f.get_slice("lm_head.weight") vocab_size, hidden_dim = tensor_slice.get_shape() tensor = tensor_slice[:, :hidden_dim].float() except: with open(os.path.join(args.basepath, "pytorch_model.bin.index.json"), "r") as f: index_json = json.loads(f.read()) head_path = index_json["weight_map"]["lm_head.weight"] weights = torch.load(os.path.join(args.basepath, head_path)) tensor = weights["lm_head.weight"].float() head.weight.data = tensor head.eval() for param in head.parameters(): param.requires_grad = False def list_files(path): datapath = [] for root, directories, files in os.walk(path): for file in files: file_path = os.path.join(root, file) datapath.append(file_path) return datapath class AddGaussianNoise: def __init__(self, mean=0.0, std=0.0): self.mean = mean self.std = std def __call__(self, data): tensor = data["hidden_state_big"] noise = torch.randn(tensor.size()) * self.std + self.mean noisy_tensor = tensor + noise data["hidden_state_big"] = noisy_tensor return data class AddUniformNoise: def __init__(self, std=0.0): self.std = std def __call__(self, data): tensor = data["hidden_state_big"] noise = (torch.rand_like(tensor) - 0.5) * self.std * 512 / tensor.shape[1] noisy_tensor = tensor + noise data["hidden_state_big"] = noisy_tensor return data class CustomDataset(Dataset): def __init__(self, datapath, transform=None): self.data = datapath self.transform = transform def __len__(self): return len(self.data) def __getitem__(self, index): # try: data = torch.load(self.data[index]) new_data = {} hidden_state = data['hidden_state'][:train_config["max_len"]][None, :] input_ids = data['input_ids'][:train_config["max_len"]][None, :] loss_mask = data["loss_mask"][:train_config["max_len"]][None, :] # except: # with open("error_path.txt", "w") as file: # file.write(self.data[index]) # print('error path',self.data[index]) length = hidden_state.shape[1] # length_q = data['query_ids'].shape[1] attention_mask = [1] * length loss_mask = loss_mask[0].tolist() loss_mask[-1] = 0 input_ids_target = input_ids[:, 1:] zeropadding = torch.tensor([[0]]) input_ids_target = torch.cat((input_ids_target, zeropadding), dim=1) target = hidden_state[:, 1:, :] zeropadding = torch.zeros(1, 1, target.shape[2]) target = torch.cat((target, zeropadding), dim=1) loss_mask[-1] = 0 new_data["attention_mask"] = attention_mask new_data["loss_mask"] = loss_mask new_data["target"] = target new_data["hidden_state_big"] = hidden_state new_data["input_ids"] = input_ids_target # sample = torch.cat((data['xs'],data['xb'])) # sample=torch.cat((self.data[index]['x'],self.data[index]['logits'])) # label = data['y'] if self.transform: new_data = self.transform(new_data) return new_data class DataCollatorWithPadding: def paddingtensor(self, intensors, N): B, n, S = intensors.shape # padding_tensor = torch.zeros(B, N - n, S,dtype=intensors.dtype) padding_tensor = torch.zeros(B, N - n, S) outtensors = torch.cat((intensors, padding_tensor), dim=1) return outtensors def paddingtensor2D(self, intensors, N): B, n = intensors.shape padding_tensor = torch.zeros(B, N - n, dtype=intensors.dtype) outtensors = torch.cat((intensors, padding_tensor), dim=1) return outtensors def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, Any]: max_length = max(item['hidden_state_big'].shape[1] for item in features) batch_input_ids = torch.cat([self.paddingtensor2D(item['input_ids'], max_length) for item in features]) batch_hidden_states = torch.cat([self.paddingtensor(item['hidden_state_big'], max_length) for item in features]) batch_target = torch.cat([self.paddingtensor(item['target'], max_length) for item in features]) batch_loss_mask = torch.tensor( [item['loss_mask'] + [0] * (max_length - len(item['loss_mask'])) for item in features]) batch_attention_mask = torch.tensor( [item['attention_mask'] + [0] * (max_length - len(item['attention_mask'])) for item in features]) # batch_loss_mask = torch.ones_like(batch_loss_mask) # batch_attention_mask=torch.ones_like(batch_attention_mask) batch = { "input_ids": batch_input_ids, "hidden_states": batch_hidden_states, "target": batch_target, "attention_mask": batch_attention_mask, "loss_mask": batch_loss_mask, } return batch def top_accuracy(output, target, topk=(1,)): # output.shape (bs, num_classes), target.shape (bs, ) """Computes the accuracy over the k top predictions for the specified values of k""" with torch.no_grad(): maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True) res.append(correct_k) return res @torch.no_grad() def getkacc(model, data, head, max_length=5): hidden_states = data["hidden_states"] input_ids = data["input_ids"] # attention_mask=data["attention_mask"] loss_mask = data["loss_mask"] # sample_mask=data["sample_mask"] target = data["target"] total = [0 for _ in range(max_length)] correct = [0 for _ in range(max_length)] bs, sl = hidden_states.shape[0], hidden_states.shape[1] target_headout = head(target) hidden_states_headout = head(hidden_states) for i in range(bs): for j in range(sl): single_hidden_states = hidden_states[i, :j] single_input_ids = input_ids[i, :j] single_hidden_states = single_hidden_states[None, :, :] single_input_ids = single_input_ids[None, :] for k in range(max_length): if loss_mask[i, single_hidden_states.shape[1] - 1] == 0: break tmp_in_target_headout = hidden_states_headout[i, single_hidden_states.shape[1] - 1] tmp_out_target_headout = target_headout[i, single_hidden_states.shape[1] - 1] target_in_token = torch.argmax(tmp_in_target_headout) target_out_token = torch.argmax(tmp_out_target_headout) tmp_token = input_ids[i, single_hidden_states.shape[1] - 1] # tmp_sample_mask=sample_mask[i,single_hidden_states.shape[1]-1] if not (target_in_token == tmp_token): break out_hidden = model(single_hidden_states, input_ids=single_input_ids) last_hidden = out_hidden[:, -1] last_headout = head(last_hidden) token = torch.argmax(last_headout) total[k] += 1 if token == target_out_token: correct[k] += 1 else: for kk in range(k + 1, max_length): total[kk] += 1 break single_hidden_states = torch.cat((single_hidden_states, out_hidden[:, -1:]), dim=1) single_input_ids = torch.cat((single_input_ids, torch.tensor([[token]]).to(single_input_ids.device)), dim=1) acc = [correct[i] / total[i] for i in range(len(correct))] return acc if train_config["data_noise"]: if train_config["noise"] == "uniform": aug = AddUniformNoise(std=train_config["std"]) else: aug = AddGaussianNoise(mean=train_config["mean"], std=train_config["std"]) else: aug = None datapath = list_files(train_config["datapath"]) traindatapath = datapath[:int(len(datapath) * 0.95)] testdatapath = datapath[int(len(datapath) * 0.95):] # print('td',train_config["datapath"]) # print(datapath) # exit() traindataset = CustomDataset(traindatapath, transform=aug) testdataset = CustomDataset(testdatapath) train_loader = DataLoader(traindataset, batch_size=train_config["bs"], shuffle=True, collate_fn=DataCollatorWithPadding(), num_workers=train_config["num_workers"], pin_memory=True) test_loader = DataLoader(testdataset, batch_size=train_config["bs"], shuffle=False, collate_fn=DataCollatorWithPadding(), num_workers=train_config["num_workers"], pin_memory=True) # for batch_data in train_loader: # print(batch_data) if accelerator.is_main_process: if not os.path.exists(args.cpdir): os.makedirs(args.cpdir) config = EConfig.from_pretrained(train_config["config_path"])
model = Model(config, load_emb=True, path=args.basepath)
0
2023-12-07 19:08:39+00:00
12k
zju3dv/EasyVolcap
easyvolcap/engine/registry.py
[ { "identifier": "deprecated_api_warning", "path": "easyvolcap/engine/misc.py", "snippet": "def deprecated_api_warning(name_dict, cls_name=None):\n \"\"\"A decorator to check if some arguments are deprecate and try to replace\n deprecate src_arg_name to dst_arg_name.\n Args:\n name_dict(dict):\n key (str): Deprecate argument names.\n val (str): Expected argument names.\n Returns:\n func: New function.\n \"\"\"\n\n def api_warning_wrapper(old_func):\n\n @functools.wraps(old_func)\n def new_func(*args, **kwargs):\n # get the arg spec of the decorated method\n args_info = getfullargspec(old_func)\n # get name of the function\n func_name = old_func.__name__\n if cls_name is not None:\n func_name = f'{cls_name}.{func_name}'\n if args:\n arg_names = args_info.args[:len(args)]\n for src_arg_name, dst_arg_name in name_dict.items():\n if src_arg_name in arg_names:\n warnings.warn(\n f'\"{src_arg_name}\" is deprecated in '\n f'`{func_name}`, please use \"{dst_arg_name}\" '\n 'instead', DeprecationWarning)\n arg_names[arg_names.index(src_arg_name)] = dst_arg_name\n if kwargs:\n for src_arg_name, dst_arg_name in name_dict.items():\n if src_arg_name in kwargs:\n\n assert dst_arg_name not in kwargs, (\n f'The expected behavior is to replace '\n f'the deprecated key `{src_arg_name}` to '\n f'new key `{dst_arg_name}`, but got them '\n f'in the arguments at the same time, which '\n f'is confusing. `{src_arg_name} will be '\n f'deprecated in the future, please '\n f'use `{dst_arg_name}` instead.')\n\n warnings.warn(\n f'\"{src_arg_name}\" is deprecated in '\n f'`{func_name}`, please use \"{dst_arg_name}\" '\n 'instead', DeprecationWarning)\n kwargs[dst_arg_name] = kwargs.pop(src_arg_name)\n\n # apply converted arguments to the decorated method\n output = old_func(*args, **kwargs)\n return output\n\n return new_func\n\n return api_warning_wrapper" }, { "identifier": "is_seq_of", "path": "easyvolcap/engine/misc.py", "snippet": "def is_seq_of(seq, expected_type, seq_type=None):\n \"\"\"Check whether it is a sequence of some type.\n Args:\n seq (Sequence): The sequence to be checked.\n expected_type (type): Expected type of sequence items.\n seq_type (type, optional): Expected sequence type.\n Returns:\n bool: Whether the sequence is valid.\n \"\"\"\n if seq_type is None:\n exp_seq_type = abc.Sequence\n else:\n assert isinstance(seq_type, type)\n exp_seq_type = seq_type\n if not isinstance(seq, exp_seq_type):\n return False\n for item in seq:\n if not isinstance(item, expected_type):\n return False\n return True" }, { "identifier": "Config", "path": "easyvolcap/engine/config.py", "snippet": "class Config:\n \"\"\"A facility for config and config files.\n\n It supports common file formats as configs: python/json/yaml. The interface\n is the same as a dict object and also allows access config values as\n attributes.\n\n Example:\n >>> cfg = Config(dict(a=1, b=dict(b1=[0, 1])))\n >>> cfg.a\n 1\n >>> cfg.b\n {'b1': [0, 1]}\n >>> cfg.b.b1\n [0, 1]\n >>> cfg = Config.fromfile('tests/data/config/a.py')\n >>> cfg.filename\n \"/home/kchen/projects/mmcv/tests/data/config/a.py\"\n >>> cfg.item4\n 'test'\n >>> cfg\n \"Config [path: /home/kchen/projects/mmcv/tests/data/config/a.py]: \"\n \"{'item1': [1, 2], 'item2': {'a': 0}, 'item3': True, 'item4': 'test'}\"\n \"\"\"\n\n @staticmethod\n def _validate_py_syntax(filename):\n with open(filename, encoding='utf-8') as f:\n # Setting encoding explicitly to resolve coding issue on windows\n content = f.read()\n try:\n ast.parse(content)\n except SyntaxError as e:\n raise SyntaxError('There are syntax errors in config '\n f'file {filename}: {e}')\n\n @staticmethod\n def _substitute_predefined_vars(filename, temp_config_name):\n file_dirname = osp.dirname(filename)\n file_basename = osp.basename(filename)\n file_basename_no_extension = osp.splitext(file_basename)[0]\n file_extname = osp.splitext(filename)[1]\n support_templates = dict(\n fileDirname=file_dirname,\n fileBasename=file_basename,\n fileBasenameNoExtension=file_basename_no_extension,\n fileExtname=file_extname)\n with open(filename, encoding='utf-8') as f:\n # Setting encoding explicitly to resolve coding issue on windows\n config_file = f.read()\n for key, value in support_templates.items():\n regexp = r'\\{\\{\\s*' + str(key) + r'\\s*\\}\\}'\n value = value.replace('\\\\', '/')\n config_file = re.sub(regexp, value, config_file)\n with open(temp_config_name, 'w', encoding='utf-8') as tmp_config_file:\n tmp_config_file.write(config_file)\n\n @staticmethod\n def _pre_substitute_base_vars(filename, temp_config_name):\n \"\"\"Substitute base variable placehoders to string, so that parsing\n would work.\"\"\"\n with open(filename, encoding='utf-8') as f:\n # Setting encoding explicitly to resolve coding issue on windows\n config_file = f.read()\n base_var_dict = {}\n regexp = r'\\{\\{\\s*' + BASE_KEY + r'\\.([\\w\\.]+)\\s*\\}\\}'\n base_vars = set(re.findall(regexp, config_file))\n for base_var in base_vars:\n randstr = f'_{base_var}_{uuid.uuid4().hex.lower()[:6]}'\n base_var_dict[randstr] = base_var\n regexp = r'\\{\\{\\s*' + BASE_KEY + r'\\.' + base_var + r'\\s*\\}\\}'\n config_file = re.sub(regexp, f'\"{randstr}\"', config_file)\n with open(temp_config_name, 'w', encoding='utf-8') as tmp_config_file:\n tmp_config_file.write(config_file)\n return base_var_dict\n\n @staticmethod\n def _substitute_base_vars(cfg, base_var_dict, base_cfg):\n \"\"\"Substitute variable strings to their actual values.\"\"\"\n cfg = copy.deepcopy(cfg)\n\n if isinstance(cfg, dict):\n for k, v in cfg.items():\n if isinstance(v, str) and v in base_var_dict:\n new_v = base_cfg\n for new_k in base_var_dict[v].split('.'):\n new_v = new_v[new_k]\n cfg[k] = new_v\n elif isinstance(v, (list, tuple, dict)):\n cfg[k] = Config._substitute_base_vars(\n v, base_var_dict, base_cfg)\n elif isinstance(cfg, tuple):\n cfg = tuple(\n Config._substitute_base_vars(c, base_var_dict, base_cfg)\n for c in cfg)\n elif isinstance(cfg, list):\n cfg = [\n Config._substitute_base_vars(c, base_var_dict, base_cfg)\n for c in cfg\n ]\n elif isinstance(cfg, str) and cfg in base_var_dict:\n new_v = base_cfg\n for new_k in base_var_dict[cfg].split('.'):\n new_v = new_v[new_k]\n cfg = new_v\n\n return cfg\n\n @staticmethod\n def _file2dict(filename, use_predefined_variables=True, extra_base_cfg_dict={}):\n filename = osp.abspath(osp.expanduser(filename))\n check_file_exist(filename)\n fileExtname = osp.splitext(filename)[1]\n if fileExtname not in ['.py', '.json', '.yaml', '.yml']:\n raise OSError('Only py/yml/yaml/json type are supported now!')\n\n with tempfile.TemporaryDirectory() as temp_config_dir:\n temp_config_file = tempfile.NamedTemporaryFile(\n dir=temp_config_dir, suffix=fileExtname)\n if platform.system() == 'Windows':\n temp_config_file.close()\n temp_config_name = osp.basename(temp_config_file.name)\n # Substitute predefined variables\n if use_predefined_variables:\n Config._substitute_predefined_vars(filename, temp_config_file.name)\n else:\n shutil.copyfile(filename, temp_config_file.name)\n # Substitute base variables from placeholders to strings\n base_var_dict = Config._pre_substitute_base_vars(\n temp_config_file.name, temp_config_file.name)\n\n if filename.endswith('.py'):\n temp_module_name = osp.splitext(temp_config_name)[0]\n sys.path.insert(0, temp_config_dir)\n Config._validate_py_syntax(filename)\n mod = import_module(temp_module_name)\n sys.path.pop(0)\n cfg_dict = {\n name: value\n for name, value in mod.__dict__.items()\n if not name.startswith('__')\n and not isinstance(value, types.ModuleType)\n and not isinstance(value, types.FunctionType)\n }\n # delete imported module\n del sys.modules[temp_module_name]\n elif filename.endswith(('.yml', '.yaml', '.json')):\n from . import io\n cfg_dict = io.load(temp_config_file.name)\n # close temp file\n temp_config_file.close()\n\n # check deprecation information\n if DEPRECATION_KEY in cfg_dict:\n deprecation_info = cfg_dict.pop(DEPRECATION_KEY)\n warning_msg = f'The config file {filename} will be deprecated ' \\\n 'in the future.'\n if 'expected' in deprecation_info:\n warning_msg += f' Please use {deprecation_info[\"expected\"]} ' \\\n 'instead.'\n if 'reference' in deprecation_info:\n warning_msg += ' More information can be found at ' \\\n f'{deprecation_info[\"reference\"]}'\n warnings.warn(warning_msg, DeprecationWarning)\n\n cfg_text = filename + '\\n'\n with open(filename, encoding='utf-8') as f:\n # Setting encoding explicitly to resolve coding issue on windows\n cfg_text += f.read()\n\n # Substitute base variables from strings to their actual values\n if BASE_KEY in cfg_dict:\n cfg_dir = osp.dirname(filename)\n base_filename = cfg_dict.pop(BASE_KEY)\n base_filename = base_filename if isinstance(base_filename, list) else [base_filename]\n base_cfg_dict = {}\n\n for f in base_filename:\n # NOTE: easyvolcap: use project-wise relative path for configuration?\n if os.path.exists(osp.join(cfg_dir, f)):\n f = osp.join(cfg_dir, f)\n\n # Load base config file with already loaded config\n _cfg_dict, _cfg_text = Config._file2dict(f, extra_base_cfg_dict=Config._merge_a_into_b(base_cfg_dict, deepcopy(extra_base_cfg_dict)))\n\n # Merge base into current base\n base_cfg_dict = Config._merge_a_into_b(_cfg_dict, base_cfg_dict) # merge next base into previous base, and then merge all base into current\n\n # Merge cfg_text\n cfg_text += _cfg_text\n\n cfg_dict = Config._merge_a_into_b(cfg_dict, base_cfg_dict)\n\n # Substitute base variables from strings to their actual values\n cfg_dict = Config._substitute_base_vars(cfg_dict, base_var_dict, Config._merge_a_into_b(base_cfg_dict, deepcopy(extra_base_cfg_dict)))\n else:\n cfg_dict = Config._substitute_base_vars(cfg_dict, base_var_dict, extra_base_cfg_dict)\n\n return cfg_dict, cfg_text\n\n @staticmethod\n def _merge_a_into_b(a, b, allow_list_keys=True):\n \"\"\"merge dict ``a`` into dict ``b`` (non-inplace).\n\n Values in ``a`` will overwrite ``b``. ``b`` is copied first to avoid\n in-place modifications.\n\n Args:\n a (dict): The source dict to be merged into ``b``.\n b (dict): The origin dict to be fetch keys from ``a``.\n allow_list_keys (bool): If True, int string keys (e.g. '0', '1')\n are allowed in source ``a`` and will replace the element of the\n corresponding index in b if b is a list. Default: False.\n\n Returns:\n dict: The modified dict of ``b`` using ``a``.\n\n Examples:\n # Normally merge a into b.\n >>> Config._merge_a_into_b(\n ... dict(obj=dict(a=2)), dict(obj=dict(a=1)))\n {'obj': {'a': 2}}\n\n # Delete b first and merge a into b.\n >>> Config._merge_a_into_b(\n ... dict(obj=dict(_delete_=True, a=2)), dict(obj=dict(a=1)))\n {'obj': {'a': 2}}\n\n # b is a list\n >>> Config._merge_a_into_b(\n ... {'0': dict(a=2)}, [dict(a=1), dict(b=2)], True)\n [{'a': 2}, {'b': 2}]\n \"\"\"\n b = b.copy()\n if BASE_KEY in a:\n # raise KeyError(f'`{BASE_KEY}` is not allowed in child config.')\n cfg_dict = a\n\n # TODO: Reuse implementation of loading base keys\n base_filename = cfg_dict.pop(BASE_KEY)\n base_filename = base_filename if isinstance(base_filename, list) else [base_filename]\n\n cfg_dict_list = []\n for f in base_filename:\n # NOTE: easyvolcap: use project-wise relative path for configuration?\n _cfg_dict, _cfg_text = Config._file2dict(f, extra_base_cfg_dict=b) # allow project level path\n cfg_dict_list.append(_cfg_dict)\n\n base_cfg_dict = dict()\n for c in cfg_dict_list:\n base_cfg_dict = Config._merge_a_into_b(c, base_cfg_dict)\n\n cfg_dict = Config._merge_a_into_b(cfg_dict, base_cfg_dict)\n\n a = cfg_dict\n\n for k, v in a.items():\n if allow_list_keys and k.isdigit() and isinstance(b, list):\n k = int(k)\n if k == len(b): # MARK: Assuming all the keys come in sorted order to be inserted\n b.append(v)\n elif k > len(b):\n raise KeyError(f'Index {k} exceeds the length of list {b}')\n # b[k] = Config._merge_a_into_b(v, b[k], allow_list_keys)\n if k == APPEND_KEY and isinstance(b, list):\n b.append(v) # just append this value or dict (no recursion)\n elif isinstance(v, dict):\n delete_base = v.pop(DELETE_KEY, False)\n if ((k in b) or (isinstance(b, list) and k <= len(b))) and (not delete_base): # dict? list? not delete?\n allowed_types = (dict, list) if allow_list_keys else dict\n if not isinstance(b[k], allowed_types):\n raise TypeError(\n f'{k}={v} in child config cannot inherit from '\n f'base because {k} is a dict in the child config '\n f'but is of type {type(b[k])} in base config. '\n f'You may set `{DELETE_KEY}=True` to ignore the '\n f'base config.')\n b[k] = Config._merge_a_into_b(v, b[k], allow_list_keys)\n else:\n b[k] = ConfigDict(v)\n else:\n b[k] = v\n return b\n\n @staticmethod\n def fromfile(filename,\n use_predefined_variables=True,\n import_custom_modules=True):\n if isinstance(filename, Path):\n filename = str(filename)\n cfg_dict, cfg_text = Config._file2dict(filename,\n use_predefined_variables)\n if import_custom_modules and cfg_dict.get('custom_imports', None):\n import_modules_from_strings(**cfg_dict['custom_imports'])\n return Config(cfg_dict, cfg_text=cfg_text, filename=filename)\n\n @staticmethod\n def fromstring(cfg_str, file_format):\n \"\"\"Generate config from config str.\n\n Args:\n cfg_str (str): Config str.\n file_format (str): Config file format corresponding to the\n config str. Only py/yml/yaml/json type are supported now!\n\n Returns:\n :obj:`Config`: Config obj.\n \"\"\"\n if file_format not in ['.py', '.json', '.yaml', '.yml']:\n raise OSError('Only py/yml/yaml/json type are supported now!')\n if file_format != '.py' and 'dict(' in cfg_str:\n # check if users specify a wrong suffix for python\n warnings.warn(\n 'Please check \"file_format\", the file format may be .py')\n with tempfile.NamedTemporaryFile(\n 'w', encoding='utf-8', suffix=file_format,\n delete=False) as temp_file:\n temp_file.write(cfg_str)\n # on windows, previous implementation cause error\n # see PR 1077 for details\n cfg = Config.fromfile(temp_file.name)\n os.remove(temp_file.name)\n return cfg\n\n @staticmethod\n def auto_argparser(description=None):\n \"\"\"Generate argparser from config file automatically (experimental)\"\"\"\n partial_parser = ArgumentParser(description=description)\n partial_parser.add_argument('config', help='config file path')\n cfg_file = partial_parser.parse_known_args()[0].config\n cfg = Config.fromfile(cfg_file)\n parser = ArgumentParser(description=description)\n parser.add_argument('config', help='config file path')\n add_args(parser, cfg)\n return parser, cfg\n\n def __init__(self, cfg_dict=None, cfg_text=None, filename=None, **kwargs):\n if cfg_dict is None:\n cfg_dict = dict(kwargs)\n elif not isinstance(cfg_dict, dict):\n raise TypeError('cfg_dict must be a dict, but '\n f'got {type(cfg_dict)}')\n for key in cfg_dict:\n if key in RESERVED_KEYS:\n raise KeyError(f'{key} is reserved for config file')\n\n if isinstance(filename, Path):\n filename = str(filename)\n\n super().__setattr__('_cfg_dict', ConfigDict(cfg_dict))\n super().__setattr__('_filename', filename)\n if cfg_text:\n text = cfg_text\n elif filename:\n with open(filename) as f:\n text = f.read()\n else:\n text = ''\n super().__setattr__('_text', text)\n\n @property\n def filename(self):\n return self._filename\n\n @property\n def text(self):\n return self._text\n\n @property\n def pretty_text(self):\n\n indent = 4\n\n def _indent(s_, num_spaces):\n s = s_.split('\\n')\n if len(s) == 1:\n return s_\n first = s.pop(0)\n s = [(num_spaces * ' ') + line for line in s]\n s = '\\n'.join(s)\n s = first + '\\n' + s\n return s\n\n def _format_basic_types(k, v, use_mapping=False):\n if isinstance(v, str):\n v_str = f\"'{v}'\"\n else:\n v_str = str(v)\n\n if use_mapping:\n k_str = f\"'{k}'\" if isinstance(k, str) else str(k)\n attr_str = f'{k_str}: {v_str}'\n else:\n attr_str = f'{str(k)}={v_str}'\n attr_str = _indent(attr_str, indent)\n\n return attr_str\n\n def _format_list(k, v, use_mapping=False):\n # check if all items in the list are dict\n if all(isinstance(_, dict) for _ in v):\n v_str = '[\\n'\n v_str += '\\n'.join(\n f'dict({_indent(_format_dict(v_), indent)}),'\n for v_ in v).rstrip(',')\n if use_mapping:\n k_str = f\"'{k}'\" if isinstance(k, str) else str(k)\n attr_str = f'{k_str}: {v_str}'\n else:\n attr_str = f'{str(k)}={v_str}'\n attr_str = _indent(attr_str, indent) + ']'\n else:\n attr_str = _format_basic_types(k, v, use_mapping)\n return attr_str\n\n def _contain_invalid_identifier(dict_str):\n contain_invalid_identifier = False\n for key_name in dict_str:\n contain_invalid_identifier |= \\\n (not str(key_name).isidentifier())\n return contain_invalid_identifier\n\n def _format_dict(input_dict, outest_level=False):\n r = ''\n s = []\n\n use_mapping = _contain_invalid_identifier(input_dict)\n if use_mapping:\n r += '{'\n for idx, (k, v) in enumerate(input_dict.items()):\n is_last = idx >= len(input_dict) - 1\n end = '' if outest_level or is_last else ','\n if isinstance(v, dict):\n v_str = '\\n' + _format_dict(v)\n if use_mapping:\n k_str = f\"'{k}'\" if isinstance(k, str) else str(k)\n attr_str = f'{k_str}: dict({v_str}'\n else:\n attr_str = f'{str(k)}=dict({v_str}'\n attr_str = _indent(attr_str, indent) + ')' + end\n elif isinstance(v, list):\n attr_str = _format_list(k, v, use_mapping) + end\n else:\n attr_str = _format_basic_types(k, v, use_mapping) + end\n\n s.append(attr_str)\n r += '\\n'.join(s)\n if use_mapping:\n r += '}'\n return r\n\n cfg_dict = self._cfg_dict.to_dict()\n text = _format_dict(cfg_dict, outest_level=True)\n # copied from setup.cfg\n yapf_style = dict(\n based_on_style='pep8',\n blank_line_before_nested_class_or_def=True,\n split_before_expression_after_opening_paren=True)\n text, _ = FormatCode(text, style_config=yapf_style, verify=True)\n\n return text\n\n def __repr__(self):\n return f'Config (path: {self.filename}): {self._cfg_dict.__repr__()}'\n\n def __len__(self):\n return len(self._cfg_dict)\n\n def __getattr__(self, name):\n return getattr(self._cfg_dict, name)\n\n def __getitem__(self, name):\n return self._cfg_dict.__getitem__(name)\n\n def __setattr__(self, name, value):\n if isinstance(value, dict):\n value = ConfigDict(value)\n self._cfg_dict.__setattr__(name, value)\n\n def __setitem__(self, name, value):\n if isinstance(value, dict):\n value = ConfigDict(value)\n self._cfg_dict.__setitem__(name, value)\n\n def __iter__(self):\n return iter(self._cfg_dict)\n\n def __getstate__(self):\n return (self._cfg_dict, self._filename, self._text)\n\n def __copy__(self):\n cls = self.__class__\n other = cls.__new__(cls)\n other.__dict__.update(self.__dict__)\n\n return other\n\n def __deepcopy__(self, memo):\n cls = self.__class__\n other = cls.__new__(cls)\n memo[id(self)] = other\n\n for key, value in self.__dict__.items():\n super(Config, other).__setattr__(key, copy.deepcopy(value, memo))\n\n return other\n\n def __setstate__(self, state):\n _cfg_dict, _filename, _text = state\n super().__setattr__('_cfg_dict', _cfg_dict)\n super().__setattr__('_filename', _filename)\n super().__setattr__('_text', _text)\n\n def dump(self, file=None, **kwargs):\n \"\"\"Dumps config into a file or returns a string representation of the\n config.\n\n If a file argument is given, saves the config to that file using the\n format defined by the file argument extension.\n\n Otherwise, returns a string representing the config. The formatting of\n this returned string is defined by the extension of `self.filename`. If\n `self.filename` is not defined, returns a string representation of a\n dict (lowercased and using ' for strings).\n\n Examples:\n >>> cfg_dict = dict(item1=[1, 2], item2=dict(a=0),\n ... item3=True, item4='test')\n >>> cfg = Config(cfg_dict=cfg_dict)\n >>> dump_file = \"a.py\"\n >>> cfg.dump(dump_file)\n\n Args:\n file (str, optional): Path of the output file where the config\n will be dumped. Defaults to None.\n \"\"\"\n from . import io\n cfg_dict = super().__getattribute__('_cfg_dict').to_dict()\n if file is None:\n if self.filename is None or self.filename.endswith('.py'):\n return self.pretty_text\n else:\n file_format = self.filename.split('.')[-1]\n return io.dump(cfg_dict, file_format=file_format, **kwargs)\n elif file.endswith('.py'):\n with open(file, 'w', encoding='utf-8') as f:\n f.write(self.pretty_text)\n else:\n file_format = file.split('.')[-1]\n return io.dump(cfg_dict, file=file, file_format=file_format, **kwargs)\n\n def merge_from_dict(self, options, allow_list_keys=True):\n \"\"\"Merge list into cfg_dict.\n\n Merge the dict parsed by MultipleKVAction into this cfg.\n\n Examples:\n >>> options = {'model.backbone.depth': 50,\n ... 'model.backbone.with_cp':True}\n >>> cfg = Config(dict(model=dict(backbone=dict(type='ResNet'))))\n >>> cfg.merge_from_dict(options)\n >>> cfg_dict = super(Config, self).__getattribute__('_cfg_dict')\n >>> assert cfg_dict == dict(\n ... model=dict(backbone=dict(depth=50, with_cp=True)))\n\n >>> # Merge list element\n >>> cfg = Config(dict(pipeline=[\n ... dict(type='LoadImage'), dict(type='LoadAnnotations')]))\n >>> options = dict(pipeline={'0': dict(type='SelfLoadImage')})\n >>> cfg.merge_from_dict(options, allow_list_keys=True)\n >>> cfg_dict = super(Config, self).__getattribute__('_cfg_dict')\n >>> assert cfg_dict == dict(pipeline=[\n ... dict(type='SelfLoadImage'), dict(type='LoadAnnotations')])\n\n Args:\n options (dict): dict of configs to merge from.\n allow_list_keys (bool): If True, int string keys (e.g. '0', '1')\n are allowed in ``options`` and will replace the element of the\n corresponding index in the config if the config is a list.\n Default: True.\n \"\"\"\n option_cfg_dict = {}\n for full_key, v in options.items():\n d = option_cfg_dict\n key_list = full_key.split('.')\n for subkey in key_list[:-1]:\n d.setdefault(subkey, ConfigDict())\n d = d[subkey]\n subkey = key_list[-1]\n d[subkey] = v\n\n cfg_dict = super().__getattribute__('_cfg_dict')\n super().__setattr__(\n '_cfg_dict',\n Config._merge_a_into_b(option_cfg_dict, cfg_dict, allow_list_keys=allow_list_keys))" }, { "identifier": "BASE_KEY", "path": "easyvolcap/engine/config.py", "snippet": "BASE_KEY = 'configs'" }, { "identifier": "DELETE_KEY", "path": "easyvolcap/engine/config.py", "snippet": "DELETE_KEY = '_delete_'" }, { "identifier": "APPEND_KEY", "path": "easyvolcap/engine/config.py", "snippet": "APPEND_KEY = '_append_' # append stuff at end of existing list" }, { "identifier": "DEPRECATION_KEY", "path": "easyvolcap/engine/config.py", "snippet": "DEPRECATION_KEY = '_deprecation_'" }, { "identifier": "dotdict", "path": "easyvolcap/utils/base_utils.py", "snippet": "class dotdict(dict, Dict[KT, VT]):\n \"\"\"\n This is the default data passing object used throughout the codebase\n Main function: dot access for dict values & dict like merging and updates\n\n a dictionary that supports dot notation \n as well as dictionary access notation \n usage: d = make_dotdict() or d = make_dotdict{'val1':'first'})\n set attributes: d.val2 = 'second' or d['val2'] = 'second'\n get attributes: d.val2 or d['val2']\n \"\"\"\n\n def update(self, dct: Dict = None, **kwargs):\n dct = copy(dct) # avoid modifying the original dict, use super's copy to avoid recursion\n\n # Handle different arguments\n if dct is None:\n dct = kwargs\n elif isinstance(dct, Mapping):\n dct.update(kwargs)\n else:\n super().update(dct, **kwargs)\n return\n\n # Recursive updates\n for k, v in dct.items():\n if k in self:\n\n # Handle type conversions\n target_type = type(self[k])\n if not isinstance(v, target_type):\n # NOTE: bool('False') will be True\n if target_type == bool and isinstance(v, str):\n dct[k] = v == 'True'\n else:\n dct[k] = target_type(v)\n\n if isinstance(v, dict):\n self[k].update(v) # recursion from here\n else:\n self[k] = v\n else:\n if isinstance(v, dict):\n self[k] = dotdict(v) # recursion?\n else:\n self[k] = v\n return self\n\n def __init__(self, *args, **kwargs):\n self.update(*args, **kwargs)\n\n copy = return_dotdict(dict.copy)\n fromkeys = return_dotdict(dict.fromkeys)\n\n # def __hash__(self):\n # # return hash(''.join([str(self.values().__hash__())]))\n # return super(dotdict, self).__hash__()\n\n # def __init__(self, *args, **kwargs):\n # super(dotdict, self).__init__(*args, **kwargs)\n\n \"\"\"\n Uncomment following lines and \n comment out __getattr__ = dict.__getitem__ to get feature:\n \n returns empty numpy array for undefined keys, so that you can easily copy things around\n TODO: potential caveat, harder to trace where this is set to np.array([], dtype=np.float32)\n \"\"\"\n\n def __getitem__(self, key):\n try:\n return dict.__getitem__(self, key)\n except KeyError as e:\n raise AttributeError(e)\n # MARK: Might encounter exception in newer version of pytorch\n # Traceback (most recent call last):\n # File \"/home/xuzhen/miniconda3/envs/torch/lib/python3.9/multiprocessing/queues.py\", line 245, in _feed\n # obj = _ForkingPickler.dumps(obj)\n # File \"/home/xuzhen/miniconda3/envs/torch/lib/python3.9/multiprocessing/reduction.py\", line 51, in dumps\n # cls(buf, protocol).dump(obj)\n # KeyError: '__getstate__'\n # MARK: Because you allow your __getattr__() implementation to raise the wrong kind of exception.\n # FIXME: not working typing hinting code\n __getattr__: Callable[..., 'torch.Tensor'] = __getitem__ # type: ignore # overidden dict.__getitem__\n __getattribute__: Callable[..., 'torch.Tensor'] # type: ignore\n # __getattr__ = dict.__getitem__\n __setattr__ = dict.__setitem__\n __delattr__ = dict.__delitem__\n\n # TODO: better ways to programmically define these special variables?\n\n @property\n def meta(self) -> dotdict:\n # Special variable used for storing cpu tensor in batch\n if 'meta' not in self:\n self.meta = dotdict()\n return self.__getitem__('meta')\n\n @meta.setter\n def meta(self, meta):\n self.__setitem__('meta', meta)\n\n @property\n def output(self) -> dotdict: # late annotation needed for this\n # Special entry for storing output tensor in batch\n if 'output' not in self:\n self.output = dotdict()\n return self.__getitem__('output')\n\n @output.setter\n def output(self, output):\n self.__setitem__('output', output)\n\n @property\n def persistent(self) -> dotdict: # late annotation needed for this\n # Special entry for storing persistent tensor in batch\n if 'persistent' not in self:\n self.persistent = dotdict()\n return self.__getitem__('persistent')\n\n @persistent.setter\n def persistent(self, persistent):\n self.__setitem__('persistent', persistent)\n\n @property\n def type(self) -> str: # late annotation needed for this\n # Special entry for type based construction system\n return self.__getitem__('type')\n\n @type.setter\n def type(self, type):\n self.__setitem__('type', type)\n\n def to_dict(self):\n out = dict()\n for k, v in self.items():\n if isinstance(v, dotdict):\n v = v.to_dict() # recursion point\n out[k] = v\n return out" } ]
import inspect import warnings from functools import partial from typing import Any, Dict, Optional, Callable from .misc import deprecated_api_warning, is_seq_of from .config import Config, BASE_KEY, DELETE_KEY, APPEND_KEY, DEPRECATION_KEY from easyvolcap.utils.base_utils import dotdict from easyvolcap.utils.console_utils import * # This is the actual entry point
8,407
# Copyright (c) OpenMMLab. All rights reserved. def get_func_args(func: Callable): signature = inspect.signature(func) return { k: v for k, v in signature.parameters.items() # if v.default is not inspect.Parameter.empty } def call_from_cfg(func: Callable, cfg: dict, **kwargs): cfg = dotdict(cfg) cfg.update(kwargs) func_args = get_func_args(func) call_args = dotdict() for k, v in func_args.items(): if v.kind == inspect.Parameter.VAR_KEYWORD: call_args = cfg break if not call_args: # empty for k, v in cfg.items(): if k not in func_args: # Maybe be quite about it # log(f'Unused arg:', line(k), '=', line(v), 'for', line(func)) # FIXME: Easy for undebuggable bugs to slip through (typo in config keys)
# Copyright (c) OpenMMLab. All rights reserved. def get_func_args(func: Callable): signature = inspect.signature(func) return { k: v for k, v in signature.parameters.items() # if v.default is not inspect.Parameter.empty } def call_from_cfg(func: Callable, cfg: dict, **kwargs): cfg = dotdict(cfg) cfg.update(kwargs) func_args = get_func_args(func) call_args = dotdict() for k, v in func_args.items(): if v.kind == inspect.Parameter.VAR_KEYWORD: call_args = cfg break if not call_args: # empty for k, v in cfg.items(): if k not in func_args: # Maybe be quite about it # log(f'Unused arg:', line(k), '=', line(v), 'for', line(func)) # FIXME: Easy for undebuggable bugs to slip through (typo in config keys)
if k not in [BASE_KEY, DELETE_KEY, APPEND_KEY, DEPRECATION_KEY]:
4
2023-12-07 08:53:42+00:00
12k
alibaba/animate-anything
models/unet_3d_condition_mask.py
[ { "identifier": "CrossAttnDownBlock3D", "path": "models/unet_3d_blocks.py", "snippet": "class CrossAttnDownBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attn_num_head_channels=1,\n cross_attention_dim=1280,\n output_scale_factor=1.0,\n downsample_padding=1,\n add_downsample=True,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n ):\n super().__init__()\n resnets = []\n attentions = []\n temp_attentions = []\n temp_convs = []\n\n self.gradient_checkpointing = False\n self.has_cross_attention = True\n self.attn_num_head_channels = attn_num_head_channels\n\n for i in range(num_layers):\n in_channels = in_channels if i == 0 else out_channels\n resnets.append(\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n temp_convs.append(\n TemporalConvLayer(\n out_channels,\n out_channels,\n dropout=0.1\n )\n )\n attentions.append(\n Transformer2DModel(\n out_channels // attn_num_head_channels,\n attn_num_head_channels,\n in_channels=out_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n )\n )\n temp_attentions.append(\n TransformerTemporalModel(\n out_channels // attn_num_head_channels,\n attn_num_head_channels,\n in_channels=out_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n )\n )\n self.resnets = nn.ModuleList(resnets)\n self.temp_convs = nn.ModuleList(temp_convs)\n self.attentions = nn.ModuleList(attentions)\n self.temp_attentions = nn.ModuleList(temp_attentions)\n\n if add_downsample:\n self.downsamplers = nn.ModuleList(\n [\n Downsample2D(\n out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name=\"op\"\n )\n ]\n )\n else:\n self.downsamplers = None\n\n def forward(\n self,\n hidden_states,\n temb=None,\n encoder_hidden_states=None,\n attention_mask=None,\n num_frames=1,\n cross_attention_kwargs=None,\n ):\n # TODO(Patrick, William) - attention mask is not used\n output_states = ()\n\n for resnet, temp_conv, attn, temp_attn in zip(\n self.resnets, self.temp_convs, self.attentions, self.temp_attentions\n ):\n \n if self.gradient_checkpointing:\n hidden_states = cross_attn_g_c(\n attn, \n temp_attn, \n resnet, \n temp_conv, \n hidden_states, \n encoder_hidden_states, \n cross_attention_kwargs, \n temb, \n num_frames,\n inverse_temp=True\n )\n else:\n hidden_states = resnet(hidden_states, temb)\n\n if num_frames > 1:\n hidden_states = temp_conv(hidden_states, num_frames=num_frames)\n\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n ).sample\n\n if num_frames > 1:\n hidden_states = temp_attn(hidden_states, num_frames=num_frames).sample\n\n output_states += (hidden_states,)\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states += (hidden_states,)\n\n return hidden_states, output_states" }, { "identifier": "CrossAttnUpBlock3D", "path": "models/unet_3d_blocks.py", "snippet": "class CrossAttnUpBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n prev_output_channel: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attn_num_head_channels=1,\n cross_attention_dim=1280,\n output_scale_factor=1.0,\n add_upsample=True,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n ):\n super().__init__()\n resnets = []\n temp_convs = []\n attentions = []\n temp_attentions = []\n\n self.gradient_checkpointing = False\n self.has_cross_attention = True\n self.attn_num_head_channels = attn_num_head_channels\n\n for i in range(num_layers):\n res_skip_channels = in_channels if (i == num_layers - 1) else out_channels\n resnet_in_channels = prev_output_channel if i == 0 else out_channels\n\n resnets.append(\n ResnetBlock2D(\n in_channels=resnet_in_channels + res_skip_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n temp_convs.append(\n TemporalConvLayer(\n out_channels,\n out_channels,\n dropout=0.1\n )\n )\n attentions.append(\n Transformer2DModel(\n out_channels // attn_num_head_channels,\n attn_num_head_channels,\n in_channels=out_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n )\n )\n temp_attentions.append(\n TransformerTemporalModel(\n out_channels // attn_num_head_channels,\n attn_num_head_channels,\n in_channels=out_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n )\n )\n self.resnets = nn.ModuleList(resnets)\n self.temp_convs = nn.ModuleList(temp_convs)\n self.attentions = nn.ModuleList(attentions)\n self.temp_attentions = nn.ModuleList(temp_attentions)\n\n if add_upsample:\n self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)])\n else:\n self.upsamplers = None\n\n def forward(\n self,\n hidden_states,\n res_hidden_states_tuple,\n temb=None,\n encoder_hidden_states=None,\n upsample_size=None,\n attention_mask=None,\n num_frames=1,\n cross_attention_kwargs=None,\n ):\n # TODO(Patrick, William) - attention mask is not used\n for resnet, temp_conv, attn, temp_attn in zip(\n self.resnets, self.temp_convs, self.attentions, self.temp_attentions\n ):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n if self.gradient_checkpointing:\n hidden_states = cross_attn_g_c(\n attn, \n temp_attn, \n resnet, \n temp_conv, \n hidden_states, \n encoder_hidden_states, \n cross_attention_kwargs, \n temb, \n num_frames,\n inverse_temp=True\n )\n else:\n hidden_states = resnet(hidden_states, temb)\n\n if num_frames > 1:\n hidden_states = temp_conv(hidden_states, num_frames=num_frames)\n\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n ).sample\n\n if num_frames > 1:\n hidden_states = temp_attn(hidden_states, num_frames=num_frames).sample\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states" }, { "identifier": "DownBlock3D", "path": "models/unet_3d_blocks.py", "snippet": "class DownBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n output_scale_factor=1.0,\n add_downsample=True,\n downsample_padding=1,\n ):\n super().__init__()\n resnets = []\n temp_convs = []\n\n self.gradient_checkpointing = False\n for i in range(num_layers):\n in_channels = in_channels if i == 0 else out_channels\n resnets.append(\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n temp_convs.append(\n TemporalConvLayer(\n out_channels,\n out_channels,\n dropout=0.1\n )\n )\n\n self.resnets = nn.ModuleList(resnets)\n self.temp_convs = nn.ModuleList(temp_convs)\n\n if add_downsample:\n self.downsamplers = nn.ModuleList(\n [\n Downsample2D(\n out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name=\"op\"\n )\n ]\n )\n else:\n self.downsamplers = None\n\n def forward(self, hidden_states, temb=None, num_frames=1):\n output_states = ()\n\n for resnet, temp_conv in zip(self.resnets, self.temp_convs):\n if self.gradient_checkpointing:\n hidden_states = up_down_g_c(resnet, temp_conv, hidden_states, temb, num_frames)\n else:\n hidden_states = resnet(hidden_states, temb)\n\n if num_frames > 1:\n hidden_states = temp_conv(hidden_states, num_frames=num_frames)\n\n output_states += (hidden_states,)\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states += (hidden_states,)\n\n return hidden_states, output_states" }, { "identifier": "UNetMidBlock3DCrossAttn", "path": "models/unet_3d_blocks.py", "snippet": "class UNetMidBlock3DCrossAttn(nn.Module):\n def __init__(\n self,\n in_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attn_num_head_channels=1,\n output_scale_factor=1.0,\n cross_attention_dim=1280,\n dual_cross_attention=False,\n use_linear_projection=True,\n upcast_attention=False,\n ):\n super().__init__()\n\n self.gradient_checkpointing = False\n self.has_cross_attention = True\n self.attn_num_head_channels = attn_num_head_channels\n resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)\n\n # there is always at least one resnet\n resnets = [\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n ]\n temp_convs = [\n TemporalConvLayer(\n in_channels,\n in_channels,\n dropout=0.1\n )\n ]\n attentions = []\n temp_attentions = []\n\n for _ in range(num_layers):\n attentions.append(\n Transformer2DModel(\n in_channels // attn_num_head_channels,\n attn_num_head_channels,\n in_channels=in_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n )\n )\n temp_attentions.append(\n TransformerTemporalModel(\n in_channels // attn_num_head_channels,\n attn_num_head_channels,\n in_channels=in_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n )\n )\n resnets.append(\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n temp_convs.append(\n TemporalConvLayer(\n in_channels,\n in_channels,\n dropout=0.1\n )\n )\n\n self.resnets = nn.ModuleList(resnets)\n self.temp_convs = nn.ModuleList(temp_convs)\n self.attentions = nn.ModuleList(attentions)\n self.temp_attentions = nn.ModuleList(temp_attentions)\n\n def forward(\n self,\n hidden_states,\n temb=None,\n encoder_hidden_states=None,\n attention_mask=None,\n num_frames=1,\n cross_attention_kwargs=None,\n ):\n if self.gradient_checkpointing:\n hidden_states = up_down_g_c(\n self.resnets[0], \n self.temp_convs[0], \n hidden_states, \n temb, \n num_frames\n )\n else:\n hidden_states = self.resnets[0](hidden_states, temb)\n hidden_states = self.temp_convs[0](hidden_states, num_frames=num_frames)\n \n for attn, temp_attn, resnet, temp_conv in zip(\n self.attentions, self.temp_attentions, self.resnets[1:], self.temp_convs[1:]\n ):\n if self.gradient_checkpointing:\n hidden_states = cross_attn_g_c(\n attn, \n temp_attn, \n resnet, \n temp_conv, \n hidden_states, \n encoder_hidden_states, \n cross_attention_kwargs, \n temb, \n num_frames\n )\n else:\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n ).sample\n \n if num_frames > 1:\n hidden_states = temp_attn(hidden_states, num_frames=num_frames).sample\n\n hidden_states = resnet(hidden_states, temb)\n\n if num_frames > 1:\n hidden_states = temp_conv(hidden_states, num_frames=num_frames)\n\n return hidden_states" }, { "identifier": "UpBlock3D", "path": "models/unet_3d_blocks.py", "snippet": "class UpBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n prev_output_channel: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n output_scale_factor=1.0,\n add_upsample=True,\n ):\n super().__init__()\n resnets = []\n temp_convs = []\n self.gradient_checkpointing = False\n for i in range(num_layers):\n res_skip_channels = in_channels if (i == num_layers - 1) else out_channels\n resnet_in_channels = prev_output_channel if i == 0 else out_channels\n\n resnets.append(\n ResnetBlock2D(\n in_channels=resnet_in_channels + res_skip_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n temp_convs.append(\n TemporalConvLayer(\n out_channels,\n out_channels,\n dropout=0.1\n )\n )\n\n self.resnets = nn.ModuleList(resnets)\n self.temp_convs = nn.ModuleList(temp_convs)\n\n if add_upsample:\n self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)])\n else:\n self.upsamplers = None\n\n def forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None, num_frames=1):\n for resnet, temp_conv in zip(self.resnets, self.temp_convs):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n if self.gradient_checkpointing:\n hidden_states = up_down_g_c(resnet, temp_conv, hidden_states, temb, num_frames)\n else:\n hidden_states = resnet(hidden_states, temb)\n\n if num_frames > 1:\n hidden_states = temp_conv(hidden_states, num_frames=num_frames)\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states" }, { "identifier": "get_down_block", "path": "models/unet_3d_blocks.py", "snippet": "def get_down_block(\n down_block_type,\n num_layers,\n in_channels,\n out_channels,\n temb_channels,\n add_downsample,\n resnet_eps,\n resnet_act_fn,\n attn_num_head_channels,\n resnet_groups=None,\n cross_attention_dim=None,\n downsample_padding=None,\n dual_cross_attention=False,\n use_linear_projection=True,\n only_cross_attention=False,\n upcast_attention=False,\n resnet_time_scale_shift=\"default\",\n):\n if down_block_type == \"DownBlock3D\":\n return DownBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif down_block_type == \"CrossAttnDownBlock3D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnDownBlock3D\")\n return CrossAttnDownBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attn_num_head_channels,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n raise ValueError(f\"{down_block_type} does not exist.\")" }, { "identifier": "get_up_block", "path": "models/unet_3d_blocks.py", "snippet": "def get_up_block(\n up_block_type,\n num_layers,\n in_channels,\n out_channels,\n prev_output_channel,\n temb_channels,\n add_upsample,\n resnet_eps,\n resnet_act_fn,\n attn_num_head_channels,\n resnet_groups=None,\n cross_attention_dim=None,\n dual_cross_attention=False,\n use_linear_projection=True,\n only_cross_attention=False,\n upcast_attention=False,\n resnet_time_scale_shift=\"default\",\n):\n if up_block_type == \"UpBlock3D\":\n return UpBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif up_block_type == \"CrossAttnUpBlock3D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnUpBlock3D\")\n return CrossAttnUpBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attn_num_head_channels,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n raise ValueError(f\"{up_block_type} does not exist.\")" }, { "identifier": "transformer_g_c", "path": "models/unet_3d_blocks.py", "snippet": "def transformer_g_c(transformer, sample, num_frames):\n sample = g_c(custom_checkpoint(transformer, mode='temp'), \n sample, num_frames, use_reentrant=False\n )['sample']\n\n return sample" } ]
from dataclasses import dataclass from typing import Any, Dict, List, Optional, Tuple, Union from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.utils import BaseOutput, logging from diffusers.models.embeddings import TimestepEmbedding, Timesteps from diffusers.models.modeling_utils import ModelMixin from diffusers.models.transformer_temporal import TransformerTemporalModel from einops import rearrange, repeat from .unet_3d_blocks import ( CrossAttnDownBlock3D, CrossAttnUpBlock3D, DownBlock3D, UNetMidBlock3DCrossAttn, UpBlock3D, get_down_block, get_up_block, transformer_g_c ) import torch import torch.nn as nn import torch.utils.checkpoint
8,980
raise ValueError(f"size {size} has to be smaller or equal to {dim}.") # Recursively walk through all the children. # Any children which exposes the set_attention_slice method # gets the message def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]): if hasattr(module, "set_attention_slice"): module.set_attention_slice(slice_size.pop()) for child in module.children(): fn_recursive_set_attention_slice(child, slice_size) reversed_slice_size = list(reversed(slice_size)) for module in self.children(): fn_recursive_set_attention_slice(module, reversed_slice_size) def _set_gradient_checkpointing(self, value=False): self.gradient_checkpointing = value self.mid_block.gradient_checkpointing = value for module in self.down_blocks + self.up_blocks: if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)): module.gradient_checkpointing = value def forward( self, sample: torch.FloatTensor, timestep: Union[torch.Tensor, float, int], encoder_hidden_states: torch.Tensor, condition_latent: torch.Tensor, mask: torch.Tensor, class_labels: Optional[torch.Tensor] = None, timestep_cond: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None, mid_block_additional_residual: Optional[torch.Tensor] = None, motion = None, return_dict: bool = True, ) -> Union[UNet3DConditionOutput, Tuple]: r""" Args: sample (`torch.FloatTensor`): (batch, num_frames, channel, height, width) noisy inputs tensor timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`models.unet_2d_condition.UNet3DConditionOutput`] instead of a plain tuple. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py). Returns: [`~models.unet_2d_condition.UNet3DConditionOutput`] or `tuple`: [`~models.unet_2d_condition.UNet3DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. """ # By default samples have to be AT least a multiple of the overall upsampling factor. # The overall upsampling factor is equal to 2 ** (# num of upsampling layears). # However, the upsampling interpolation output size can be forced to fit any upsampling size # on the fly if necessary. default_overall_up_factor = 2**self.num_upsamplers sample = torch.cat([condition_latent, sample], dim=2) # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor` forward_upsample_size = False upsample_size = None if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]): logger.info("Forward upsample size to force interpolation output size.") forward_upsample_size = True # prepare attention_mask if attention_mask is not None: attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0 attention_mask = attention_mask.unsqueeze(1) # 1. time timesteps = timestep if not torch.is_tensor(timesteps): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) is_mps = sample.device.type == "mps" if isinstance(timestep, float): dtype = torch.float32 if is_mps else torch.float64 else: dtype = torch.int32 if is_mps else torch.int64 timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device) elif len(timesteps.shape) == 0: timesteps = timesteps[None].to(sample.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML num_frames = sample.shape[2] timesteps = timesteps.expand(sample.shape[0]) t_emb = self.time_proj(timesteps) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might actually be running in fp16. so we need to cast here. # there might be better ways to encapsulate this. t_emb = t_emb.to(dtype=self.dtype) if self.motion_strength and motion is not None: timestep_cond = self.motion_proj(motion).to(dtype=self.dtype) emb = self.time_embedding(t_emb, timestep_cond) #emb += self.motion_embedding(m_emb) else: emb = self.time_embedding(t_emb, timestep_cond) emb = emb.repeat_interleave(repeats=num_frames, dim=0) encoder_hidden_states = encoder_hidden_states.repeat_interleave(repeats=num_frames, dim=0) # 2. pre-process if self.motion_mask and mask is not None: mask = repeat(mask , 'b 1 1 h w -> (t b) 1 f h w', t=sample.shape[0]//mask.shape[0], f=sample.shape[2]) sample = torch.cat([mask, sample], dim=1) sample = sample.permute(0, 2, 1, 3, 4).reshape((sample.shape[0] * num_frames, -1) + sample.shape[3:]) sample = self.conv_in2(sample) else: sample = sample.permute(0, 2, 1, 3, 4).reshape((sample.shape[0] * num_frames, -1) + sample.shape[3:]) sample = self.conv_in(sample) if num_frames > 1: if self.gradient_checkpointing:
# Copyright 2023 Alibaba DAMO-VILAB and The HuggingFace Team. All rights reserved. # Copyright 2023 The ModelScope Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class UNet3DConditionOutput(BaseOutput): """ Args: sample (`torch.FloatTensor` of shape `(batch_size, num_frames, num_channels, height, width)`): Hidden states conditioned on `encoder_hidden_states` input. Output of last layer of model. """ sample: torch.FloatTensor class UNet3DConditionModel(ModelMixin, ConfigMixin): r""" UNet3DConditionModel is a conditional 2D UNet model that takes in a noisy sample, conditional state, and a timestep and returns sample shaped output. This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library implements for all the models (such as downloading or saving, etc.) Parameters: sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`): Height and width of input/output sample. in_channels (`int`, *optional*, defaults to 4): The number of channels in the input sample. out_channels (`int`, *optional*, defaults to 4): The number of channels in the output. down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`): The tuple of downsample blocks to use. up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D",)`): The tuple of upsample blocks to use. block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`): The tuple of output channels for each block. layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block. downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution. mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block. act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use. norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization. If `None`, it will skip the normalization and activation layers in post-processing norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization. cross_attention_dim (`int`, *optional*, defaults to 1280): The dimension of the cross attention features. attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads. """ _supports_gradient_checkpointing = True @register_to_config def __init__( self, sample_size: Optional[int] = None, in_channels: int = 4, out_channels: int = 4, down_block_types: Tuple[str] = ( "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D", ), up_block_types: Tuple[str] = ("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D"), block_out_channels: Tuple[int] = (320, 640, 1280, 1280), layers_per_block: int = 2, downsample_padding: int = 1, mid_block_scale_factor: float = 1, act_fn: str = "silu", norm_num_groups: Optional[int] = 32, norm_eps: float = 1e-5, cross_attention_dim: int = 1024, attention_head_dim: Union[int, Tuple[int]] = 64, motion_mask = False, motion_strength = False, ): super().__init__() self.motion_mask = motion_mask self.motion_strength = motion_strength print(f"motion mask {self.motion_mask}, motion_strength {self.motion_strength}") self.sample_size = sample_size self.gradient_checkpointing = False # Check inputs if len(down_block_types) != len(up_block_types): raise ValueError( f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}." ) if len(block_out_channels) != len(down_block_types): raise ValueError( f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}." ) if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types): raise ValueError( f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}." ) # input conv_in_kernel = 3 conv_out_kernel = 3 conv_in_padding = (conv_in_kernel - 1) // 2 self.conv_in = nn.Conv2d( in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding ) self.conv_in2 = nn.Conv2d( 5, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding ) # time time_embed_dim = block_out_channels[0] * 4 self.time_proj = Timesteps(block_out_channels[0], True, 0) timestep_input_dim = block_out_channels[0] self.time_embedding = TimestepEmbedding( timestep_input_dim, time_embed_dim, act_fn=act_fn, cond_proj_dim=block_out_channels[0], ) self.motion_proj = Timesteps(block_out_channels[0], True, 0) self.motion_embedding = nn.Sequential( nn.Linear(timestep_input_dim, time_embed_dim), nn.SiLU(), nn.Linear(time_embed_dim, time_embed_dim)) nn.init.zeros_(self.motion_embedding[-1].weight) nn.init.zeros_(self.motion_embedding[-1].bias) self.transformer_in = TransformerTemporalModel( num_attention_heads=8, attention_head_dim=attention_head_dim, in_channels=block_out_channels[0], num_layers=1, ) # class embedding self.down_blocks = nn.ModuleList([]) self.up_blocks = nn.ModuleList([]) if isinstance(attention_head_dim, int): attention_head_dim = (attention_head_dim,) * len(down_block_types) # down output_channel = block_out_channels[0] for i, down_block_type in enumerate(down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 down_block = get_down_block( down_block_type, num_layers=layers_per_block, in_channels=input_channel, out_channels=output_channel, temb_channels=time_embed_dim, add_downsample=not is_final_block, resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, cross_attention_dim=cross_attention_dim, attn_num_head_channels=attention_head_dim[i], downsample_padding=downsample_padding, dual_cross_attention=False, ) self.down_blocks.append(down_block) # mid self.mid_block = UNetMidBlock3DCrossAttn( in_channels=block_out_channels[-1], temb_channels=time_embed_dim, resnet_eps=norm_eps, resnet_act_fn=act_fn, output_scale_factor=mid_block_scale_factor, cross_attention_dim=cross_attention_dim, attn_num_head_channels=attention_head_dim[-1], resnet_groups=norm_num_groups, dual_cross_attention=False, ) # count how many layers upsample the images self.num_upsamplers = 0 # up reversed_block_out_channels = list(reversed(block_out_channels)) reversed_attention_head_dim = list(reversed(attention_head_dim)) output_channel = reversed_block_out_channels[0] for i, up_block_type in enumerate(up_block_types): is_final_block = i == len(block_out_channels) - 1 prev_output_channel = output_channel output_channel = reversed_block_out_channels[i] input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)] # add upsample block for all BUT final layer if not is_final_block: add_upsample = True self.num_upsamplers += 1 else: add_upsample = False up_block = get_up_block( up_block_type, num_layers=layers_per_block + 1, in_channels=input_channel, out_channels=output_channel, prev_output_channel=prev_output_channel, temb_channels=time_embed_dim, add_upsample=add_upsample, resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, cross_attention_dim=cross_attention_dim, attn_num_head_channels=reversed_attention_head_dim[i], dual_cross_attention=False, ) self.up_blocks.append(up_block) prev_output_channel = output_channel # out if norm_num_groups is not None: self.conv_norm_out = nn.GroupNorm( num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps ) self.conv_act = nn.SiLU() else: self.conv_norm_out = None self.conv_act = None conv_out_padding = (conv_out_kernel - 1) // 2 self.conv_out = nn.Conv2d( block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding ) def set_attention_slice(self, slice_size): r""" Enable sliced attention computation. When this option is enabled, the attention module will split the input tensor in slices, to compute attention in several steps. This is useful to save some memory in exchange for a small speed decrease. Args: slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`): When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If `"max"`, maxium amount of memory will be saved by running only one slice at a time. If a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim` must be a multiple of `slice_size`. """ sliceable_head_dims = [] def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module): if hasattr(module, "set_attention_slice"): sliceable_head_dims.append(module.sliceable_head_dim) for child in module.children(): fn_recursive_retrieve_slicable_dims(child) # retrieve number of attention layers for module in self.children(): fn_recursive_retrieve_slicable_dims(module) num_slicable_layers = len(sliceable_head_dims) if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory slice_size = [dim // 2 for dim in sliceable_head_dims] elif slice_size == "max": # make smallest slice possible slice_size = num_slicable_layers * [1] slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size if len(slice_size) != len(sliceable_head_dims): raise ValueError( f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different" f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}." ) for i in range(len(slice_size)): size = slice_size[i] dim = sliceable_head_dims[i] if size is not None and size > dim: raise ValueError(f"size {size} has to be smaller or equal to {dim}.") # Recursively walk through all the children. # Any children which exposes the set_attention_slice method # gets the message def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]): if hasattr(module, "set_attention_slice"): module.set_attention_slice(slice_size.pop()) for child in module.children(): fn_recursive_set_attention_slice(child, slice_size) reversed_slice_size = list(reversed(slice_size)) for module in self.children(): fn_recursive_set_attention_slice(module, reversed_slice_size) def _set_gradient_checkpointing(self, value=False): self.gradient_checkpointing = value self.mid_block.gradient_checkpointing = value for module in self.down_blocks + self.up_blocks: if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)): module.gradient_checkpointing = value def forward( self, sample: torch.FloatTensor, timestep: Union[torch.Tensor, float, int], encoder_hidden_states: torch.Tensor, condition_latent: torch.Tensor, mask: torch.Tensor, class_labels: Optional[torch.Tensor] = None, timestep_cond: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None, mid_block_additional_residual: Optional[torch.Tensor] = None, motion = None, return_dict: bool = True, ) -> Union[UNet3DConditionOutput, Tuple]: r""" Args: sample (`torch.FloatTensor`): (batch, num_frames, channel, height, width) noisy inputs tensor timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`models.unet_2d_condition.UNet3DConditionOutput`] instead of a plain tuple. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py). Returns: [`~models.unet_2d_condition.UNet3DConditionOutput`] or `tuple`: [`~models.unet_2d_condition.UNet3DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. """ # By default samples have to be AT least a multiple of the overall upsampling factor. # The overall upsampling factor is equal to 2 ** (# num of upsampling layears). # However, the upsampling interpolation output size can be forced to fit any upsampling size # on the fly if necessary. default_overall_up_factor = 2**self.num_upsamplers sample = torch.cat([condition_latent, sample], dim=2) # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor` forward_upsample_size = False upsample_size = None if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]): logger.info("Forward upsample size to force interpolation output size.") forward_upsample_size = True # prepare attention_mask if attention_mask is not None: attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0 attention_mask = attention_mask.unsqueeze(1) # 1. time timesteps = timestep if not torch.is_tensor(timesteps): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) is_mps = sample.device.type == "mps" if isinstance(timestep, float): dtype = torch.float32 if is_mps else torch.float64 else: dtype = torch.int32 if is_mps else torch.int64 timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device) elif len(timesteps.shape) == 0: timesteps = timesteps[None].to(sample.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML num_frames = sample.shape[2] timesteps = timesteps.expand(sample.shape[0]) t_emb = self.time_proj(timesteps) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might actually be running in fp16. so we need to cast here. # there might be better ways to encapsulate this. t_emb = t_emb.to(dtype=self.dtype) if self.motion_strength and motion is not None: timestep_cond = self.motion_proj(motion).to(dtype=self.dtype) emb = self.time_embedding(t_emb, timestep_cond) #emb += self.motion_embedding(m_emb) else: emb = self.time_embedding(t_emb, timestep_cond) emb = emb.repeat_interleave(repeats=num_frames, dim=0) encoder_hidden_states = encoder_hidden_states.repeat_interleave(repeats=num_frames, dim=0) # 2. pre-process if self.motion_mask and mask is not None: mask = repeat(mask , 'b 1 1 h w -> (t b) 1 f h w', t=sample.shape[0]//mask.shape[0], f=sample.shape[2]) sample = torch.cat([mask, sample], dim=1) sample = sample.permute(0, 2, 1, 3, 4).reshape((sample.shape[0] * num_frames, -1) + sample.shape[3:]) sample = self.conv_in2(sample) else: sample = sample.permute(0, 2, 1, 3, 4).reshape((sample.shape[0] * num_frames, -1) + sample.shape[3:]) sample = self.conv_in(sample) if num_frames > 1: if self.gradient_checkpointing:
sample = transformer_g_c(self.transformer_in, sample, num_frames)
7
2023-12-07 08:26:29+00:00
12k
octo-models/octo
scripts/finetune.py
[ { "identifier": "make_single_dataset", "path": "octo/data/dataset.py", "snippet": "def make_single_dataset(\n dataset_kwargs: dict,\n *,\n train: bool,\n traj_transform_kwargs: dict = {},\n frame_transform_kwargs: dict = {},\n) -> dl.DLataset:\n \"\"\"Creates a single dataset from kwargs. Returns a dataset of trajectories.\n\n Args:\n dataset_kwargs: kwargs passed to `make_dataset_from_rlds` that are dataset-specific.\n train: whether this is a training or validation dataset.\n traj_transform_kwargs: kwargs passed to 'apply_trajectory_transforms'.\n frame_transform_kwargs: kwargs passed to 'get_frame_transforms'.\n \"\"\"\n dataset, dataset_statistics = make_dataset_from_rlds(\n **dataset_kwargs,\n train=train,\n )\n dataset = apply_trajectory_transforms(dataset, **traj_transform_kwargs, train=train)\n dataset = apply_frame_transforms(dataset, **frame_transform_kwargs, train=train)\n\n # this seems to reduce memory usage without affecting speed\n dataset = dataset.with_ram_budget(1)\n\n # save for later\n dataset.dataset_statistics = dataset_statistics\n return dataset" }, { "identifier": "OctoModel", "path": "octo/model/octo_model.py", "snippet": "class OctoModel:\n \"\"\"Recommended way of interacting with Octo models.\n\n Usage for inference:\n\n >>> model = OctoModel.load_pretrained(checkpoint_dir)\n >>> tasks = model.create_tasks(texts=[\"go to the red room\"])\n >>> # or tasks = model.create_tasks(goals={\"image_primary\": goal_images})\n >>> actions = model.sample_actions(observations, tasks, rng=jax.random.PRNGKey(0))\n >>> # Note: these are normalized actions (processed to mean 0 and std 1). To get the raw actions,\n # un-normalize them using model.dataset_statistics\n\n Usage for finetuning:\n\n >>> model = OctoModel.load_pretrained(checkpoint_dir)\n >>> train_state = octo.utils.train_utils.TrainState.create(\n rng=jax.random.PRNGKey(0),\n model=model,\n tx=optax.adamw(...)\n )\n >>> # access params through train_state.model.params\n >>> train_state, metrics = your_update_function(train_state, batch)\n >>> # when it's time to save (note that this only saves the model parameters,\n >>> # not the full optimizer state)\n >>> train_state.model.save_pretrained(step, save_dir)\n\n Usage for pretraining:\n\n >>> model = OctoModel.from_config(\n config,\n example_batch,\n text_processor\n ) # initializes params\n >>> # Continue as in finetuning example\n\n See full usage examples in train.py and finetune.py.\n\n \"\"\"\n\n module: OctoModule = struct.field(pytree_node=False)\n text_processor: TextProcessor = struct.field(pytree_node=False)\n config: Config = struct.field(pytree_node=False)\n params: Params\n example_batch: Data\n dataset_statistics: Optional[Data]\n\n def create_tasks(\n self, goals: Optional[Data] = None, texts: Optional[Sequence[str]] = None\n ):\n \"\"\"Creates tasks dict from goals and texts.\n\n Args:\n goals: if not None, dict of arrays with shape (batch_size, *)\n texts: if not None, list of texts of length batch_size\n\n Omit images to run the language-conditioned model, and omit texts to run the\n goal-conditioned model.\n \"\"\"\n assert goals is not None or texts is not None\n tasks = {\"pad_mask_dict\": {}}\n if goals is not None:\n tasks.update(goals)\n tasks[\"pad_mask_dict\"].update(\n {k: np.ones(v.shape[:1], dtype=bool) for k, v in goals.items()}\n )\n else:\n batch_size = len(texts)\n tasks.update(\n {\n k: np.zeros((batch_size, *v.shape[1:]), dtype=v.dtype)\n for k, v in self.example_batch[\"task\"].items()\n if k not in (\"pad_mask_dict\", \"language_instruction\")\n }\n )\n tasks[\"pad_mask_dict\"].update(\n {\n k: np.zeros(batch_size, dtype=bool)\n for k in tasks.keys()\n if k != \"pad_mask_dict\"\n }\n )\n\n if texts is not None:\n assert self.text_processor is not None\n tasks[\"language_instruction\"] = texts\n tasks[\"pad_mask_dict\"][\"language_instruction\"] = np.ones(\n len(texts), dtype=bool\n )\n else:\n batch_size = jax.tree_leaves(goals)[0].shape[0]\n tasks[\"language_instruction\"] = [\"\"] * batch_size\n tasks[\"pad_mask_dict\"][\"language_instruction\"] = np.zeros(\n batch_size, dtype=bool\n )\n\n if self.text_processor is not None:\n tasks[\"language_instruction\"] = self.text_processor.encode(\n tasks[\"language_instruction\"]\n )\n else:\n del tasks[\"language_instruction\"]\n\n _verify_shapes(tasks, \"tasks\", self.example_batch[\"task\"], starting_dim=1)\n return tasks\n\n @partial(jax.jit, static_argnames=(\"train\",))\n def run_transformer(\n self, observations: Data, tasks: Data, pad_mask: ArrayLike, train: bool = False\n ):\n \"\"\"Runs the transformer, but does shape checking on the inputs.\n\n Args:\n observations: dictionary of arrays of shape (batch_size, window_size, *shape).\n Shape must be consistent with self.example_batch[\"observation\"]\n tasks: dict of tasks of shape (batch_size, *shape)\n Shape must be consistent with self.example_batch[\"task\"]\n pad_mask: (batch_size, window_size) Boolean mask that is False when the timestep corresponds to padding\n train: whether to run in train mode\n \"\"\"\n _verify_shapes(\n observations,\n \"observations\",\n self.example_batch[\"observation\"],\n starting_dim=2,\n )\n _verify_shapes(tasks, \"tasks\", self.example_batch[\"task\"], starting_dim=1)\n\n return self.module.apply(\n {\"params\": self.params},\n observations,\n tasks,\n pad_mask,\n train=train,\n method=\"octo_transformer\",\n )\n\n @partial(jax.jit, static_argnames=(\"train\", \"sample_shape\", \"argmax\"))\n def sample_actions(\n self,\n observations: Data,\n tasks: Data,\n pad_mask: Optional[ArrayLike] = None,\n train: bool = False,\n argmax: bool = False,\n sample_shape: Tuple[int, ...] = (),\n rng: Optional[PRNGKey] = None,\n temperature: float = 1.0,\n ):\n \"\"\"Samples actions from the model. See `action_heads.py` for more info.\n\n Args:\n observations: dictionary of arrays of shape (batch_size, window_size, *)\n tasks: dict of tasks of shape (batch_size, *)\n pad_mask: (batch_size, window_size) Boolean mask that is False when the timestep corresponds to padding\n train: whether to run in train mode\n ...see `action_heads.py` for the rest of the kwargs.\n Returns:\n actions: (*sample_shape, batch_size, pred_horizon, action_dim)\n \"\"\"\n if pad_mask is None:\n pad_mask = observations[\"pad_mask\"]\n\n transformer_outputs = self.run_transformer(\n observations, tasks, pad_mask, train=train\n )\n action_head: ActionHead = self.module.bind({\"params\": self.params}).heads[\n \"action\"\n ]\n return action_head.predict_action(\n transformer_outputs,\n train=train,\n argmax=argmax,\n sample_shape=sample_shape,\n rng=rng,\n temperature=temperature,\n )\n\n @classmethod\n def load_pretrained(\n cls,\n checkpoint_path: str,\n step: Optional[int] = None,\n ) -> \"OctoModel\":\n \"\"\"Loads a model from a checkpoint that was saved via `save_pretrained`.\n\n Args:\n checkpoint_path (str): A path to either a directory of checkpoints or a single checkpoint.\n step (int, optional): If multiple checkpoints are present, which one to load. Defaults to the latest.\n \"\"\"\n if checkpoint_path.startswith(\"hf://\"):\n if step:\n raise ValueError(\n \"You can't set config['pretrained_step'] when loading from HuggingFace.\"\n )\n checkpoint_path = _download_from_huggingface(\n checkpoint_path.removeprefix(\"hf://\")\n )\n\n # load config\n with tf.io.gfile.GFile(\n tf.io.gfile.join(checkpoint_path, \"config.json\"), \"r\"\n ) as f:\n config = json.load(f)\n\n # load example batch\n with tf.io.gfile.GFile(\n tf.io.gfile.join(checkpoint_path, \"example_batch.msgpack\"), \"rb\"\n ) as f:\n example_batch = flax.serialization.msgpack_restore(f.read())\n # shim for migrating from \"tasks\" to \"task\"\n if \"tasks\" in example_batch:\n example_batch[\"task\"] = example_batch.pop(\"tasks\")\n\n logging.debug(\n \"Model was trained with observations: %s\",\n flax.core.pretty_repr(\n jax.tree_map(jnp.shape, example_batch[\"observation\"])\n ),\n )\n logging.debug(\n \"Model was trained with tasks: %s\",\n flax.core.pretty_repr(jax.tree_map(jnp.shape, example_batch[\"task\"])),\n )\n\n # load dataset statistics\n with tf.io.gfile.GFile(\n tf.io.gfile.join(checkpoint_path, \"dataset_statistics.json\"), \"r\"\n ) as f:\n dataset_statistics = json.load(f)\n dataset_statistics = jax.tree_map(\n np.array, dataset_statistics, is_leaf=lambda x: not isinstance(x, dict)\n )\n\n # create model def (an OctoModule)\n module = OctoModule.create(**config[\"model\"])\n # infer params shape without actually doing any computation\n params_shape = jax.eval_shape(\n partial(module.init, train=False),\n jax.random.PRNGKey(0),\n example_batch[\"observation\"],\n example_batch[\"task\"],\n example_batch[\"observation\"][\"pad_mask\"],\n )[\"params\"]\n # restore params, checking to make sure the shape matches\n checkpointer = orbax.checkpoint.CheckpointManager(\n checkpoint_path, orbax.checkpoint.PyTreeCheckpointer()\n )\n step = step if step is not None else checkpointer.latest_step()\n params = checkpointer.restore(step, params_shape)\n\n if config[\"text_processor\"] is not None:\n text_processor = ModuleSpec.instantiate(config[\"text_processor\"])()\n else:\n text_processor = None\n\n return cls(\n module=module,\n params=params,\n text_processor=text_processor,\n example_batch=example_batch,\n config=config,\n dataset_statistics=dataset_statistics,\n )\n\n def save_pretrained(\n self,\n step: int,\n checkpoint_path: Optional[str] = None,\n checkpoint_manager: Optional[orbax.checkpoint.CheckpointManager] = None,\n ):\n \"\"\"Saves a model, as well as corresponding metadata needed for `load_pretrained`. Takes either a\n pre-existing checkpoint manager (which already knows where to save the checkpoint) or a path to a\n directory to save the checkpoint to.\n\n Args:\n step (int): Step number.\n checkpoint_path (str, optional): Path to save the checkpoint.\n checkpoint_manager (optional): Checkpoint manager to save the checkpoint.\n params (optional): Params to save. If None, uses self.params.\n \"\"\"\n if (checkpoint_path is None) == (checkpoint_manager is None):\n raise ValueError(\n \"Must provide exactly one of checkpoint_path or checkpoint_manager.\"\n )\n if checkpoint_manager is None:\n checkpoint_manager = orbax.checkpoint.CheckpointManager(\n checkpoint_path, orbax.checkpoint.PyTreeCheckpointer()\n )\n if checkpoint_path is None:\n checkpoint_path = str(checkpoint_manager._directory)\n\n # save params\n checkpoint_manager.save(\n step,\n self.params,\n {\"save_args\": orbax_utils.save_args_from_target(self.params)},\n )\n\n if jax.process_index() == 0:\n # save config\n config_path = tf.io.gfile.join(checkpoint_path, \"config.json\")\n if not tf.io.gfile.exists(config_path):\n with tf.io.gfile.GFile(config_path, \"w\") as f:\n json.dump(self.config, f)\n\n # save example batch\n example_batch_path = tf.io.gfile.join(\n checkpoint_path, \"example_batch.msgpack\"\n )\n if not tf.io.gfile.exists(example_batch_path):\n with tf.io.gfile.GFile(example_batch_path, \"wb\") as f:\n f.write(flax.serialization.msgpack_serialize(self.example_batch))\n\n # save dataset statistics\n dataset_statistics_path = tf.io.gfile.join(\n checkpoint_path, \"dataset_statistics.json\"\n )\n if not tf.io.gfile.exists(dataset_statistics_path):\n with tf.io.gfile.GFile(dataset_statistics_path, \"w\") as f:\n json.dump(\n jax.tree_map(lambda x: x.tolist(), self.dataset_statistics),\n f,\n )\n\n @classmethod\n def from_config(\n cls,\n config: Config,\n example_batch: Data,\n text_processor: Optional[Any] = None,\n verbose: bool = False,\n rng: Optional[PRNGKey] = None,\n dataset_statistics: Optional[Data] = None,\n ):\n \"\"\"Initializes a model with a fresh set of weights from a given config + example_batch.\n\n Args:\n config (Dict[str, Any]): Config dict. The only required key is \"model\", but other configuration\n may be saved for posterity.\n example_batch (Dict[str, Any]): Example batch.\n text_processor (Any, optional): Preprocessor for text inputs.\n verbose (bool, optional): Whether to print out a summary of the model.\n rng (Optional[PRNGKey], optional): RNG key for initializing the model.\n dataset_statistics (Optional[Dict[str, Any]], optional): Dataset statistics.\n \"\"\"\n module = OctoModule.create(**config[\"model\"])\n rng = rng if rng is not None else jax.random.PRNGKey(0)\n example_batch = multihost_utils.process_allgather(example_batch)\n example_batch = jax.tree_map(lambda x: x[:1], example_batch)\n\n init_args = (\n example_batch[\"observation\"],\n example_batch[\"task\"],\n example_batch[\"observation\"][\"pad_mask\"],\n )\n\n if verbose:\n print(\n module.tabulate(rng, *init_args, train=False, verbose=True, depth=2)\n ) # Prints out the parameter count of our model, and tokenizer details\n\n @jax.jit\n def _init(rng):\n return module.init(rng, *init_args, train=False)\n\n params = _init(rng)[\"params\"]\n\n return cls(\n module=module,\n params=params,\n text_processor=text_processor,\n example_batch=example_batch,\n config=config,\n dataset_statistics=dataset_statistics,\n )\n\n def get_pretty_spec(self):\n \"\"\"Brief summary of the model's expected inputs and outputs.\"\"\"\n # TODO: generalize this to print out proprio when it is being tokenized\n window_size = self.example_batch[\"observation\"][\"pad_mask\"].shape[1]\n\n observation_space = {\n k: (\"batch\", \"history_window\", *v.shape[2:])\n for k, v in self.example_batch[\"observation\"].items()\n if k.startswith(\"image\")\n }\n task_space = {\n k: (\"batch\", *v.shape[1:])\n for k, v in self.example_batch[\"task\"].items()\n if k.startswith(\"image\")\n }\n if self.text_processor is not None:\n task_space[\"language_instruction\"] = jax.tree_map(\n lambda arr: (\"batch\", *arr.shape[1:]),\n self.example_batch[\"task\"][\"language_instruction\"],\n )\n\n try:\n action_head = self.module.heads[\"action\"]\n action_head_repr = str(action_head.__class__)\n action_dim, pred_horizon = action_head.action_dim, action_head.pred_horizon\n except:\n action_head_repr, action_dim, pred_horizon = \"\", None, None\n\n return SPEC_TEMPLATE.format(\n window_size=window_size,\n observation_space=flax.core.pretty_repr(observation_space),\n task_space=flax.core.pretty_repr(task_space),\n action_head_repr=action_head_repr,\n action_dim=action_dim,\n pred_horizon=pred_horizon,\n )" }, { "identifier": "initialize_compilation_cache", "path": "octo/utils/jax_utils.py", "snippet": "def initialize_compilation_cache(\n cache_dir=os.path.expanduser(\"~/.jax_compilation_cache\"),\n):\n \"\"\"Initializes the Jax persistent compilation cache.\"\"\"\n compilation_cache.initialize_cache(cache_dir)\n for logger in [logging.getLogger(name) for name in logging.root.manager.loggerDict]:\n logger.addFilter(\n lambda record: \"Not writing persistent cache entry for\"\n not in record.getMessage()\n )" }, { "identifier": "ModuleSpec", "path": "octo/utils/spec.py", "snippet": "class ModuleSpec(TypedDict):\n \"\"\"A JSON-serializable representation of a function or class with some default args and kwargs to pass to\n it. Useful for specifying a particular class or function in a config file, while keeping it serializable\n and overridable from the command line using ml_collections.\n\n Usage:\n\n # Preferred way to create a spec:\n >>> from octo.model.components.transformer import Transformer\n >>> spec = ModuleSpec.create(Transformer, num_layers=3)\n # Same as above using the fully qualified import string:\n >>> spec = ModuleSpec.create(\"octo.model.components.transformer:Transformer\", num_layers=3)\n\n # Usage:\n >>> ModuleSpec.instantiate(spec) == partial(Transformer, num_layers=3)\n # can pass additional kwargs at instantiation time\n >>> transformer = ModuleSpec.instantiate(spec, num_heads=8)\n\n Note: ModuleSpec is just an alias for a dictionary (that is strongly typed), not a real class. So from\n your code's perspective, it is just a dictionary.\n\n module (str): The module the callable is located in\n name (str): The name of the callable in the module\n args (tuple): The args to pass to the callable\n kwargs (dict): The kwargs to pass to the callable\n \"\"\"\n\n module: str\n name: str\n args: Tuple[Any, ...]\n kwargs: Dict[str, Any]\n\n @staticmethod\n def create(callable_or_full_name: Union[str, callable], *args, **kwargs) -> \"ModuleSpec\": # type: ignore\n \"\"\"Create a module spec from a callable or import string.\n\n Args:\n callable_or_full_name (str or object): Either the object itself or a fully qualified import string\n (e.g. \"octo.model.components.transformer:Transformer\")\n args (tuple, optional): Passed into callable upon instantiation.\n kwargs (dict, optional): Passed into callable upon instantiation.\n \"\"\"\n if isinstance(callable_or_full_name, str):\n assert callable_or_full_name.count(\":\") == 1, (\n \"If passing in a string, it must be a fully qualified import string \"\n \"(e.g. 'octo.model.components.transformer:Transformer')\"\n )\n module, name = callable_or_full_name.split(\":\")\n else:\n module, name = _infer_full_name(callable_or_full_name)\n\n return ModuleSpec(module=module, name=name, args=args, kwargs=kwargs)\n\n @staticmethod\n def instantiate(spec: \"ModuleSpec\"): # type: ignore\n if set(spec.keys()) != {\"module\", \"name\", \"args\", \"kwargs\"}:\n raise ValueError(\n f\"Expected ModuleSpec, but got {spec}. \"\n \"ModuleSpec must have keys 'module', 'name', 'args', and 'kwargs'.\"\n )\n cls = _import_from_string(spec[\"module\"], spec[\"name\"])\n return partial(cls, *spec[\"args\"], **spec[\"kwargs\"])" }, { "identifier": "RolloutVisualizationCallback", "path": "octo/utils/train_callbacks.py", "snippet": "class RolloutVisualizationCallback(Callback):\n visualizer_kwargs_list: Sequence[Mapping[str, Any]]\n text_processor: TextProcessor\n trajs_for_rollouts: int\n model_pred_horizon: int\n history_length: int\n modes_to_evaluate: str = (\"text_conditioned\", \"image_conditioned\")\n\n def __post_init__(self):\n self.zero_text = jax.tree_map(lambda x: x[0], self.text_processor.encode(\"\"))\n\n self.rollout_visualizers = [\n RolloutVisualizer(\n text_processor=self.text_processor,\n history_length=self.history_length,\n action_chunk=self.model_pred_horizon\n if \"pred_horizon\" not in kwargs\n else kwargs[\"pred_horizon\"],\n **kwargs,\n )\n for kwargs in self.visualizer_kwargs_list\n ]\n\n def __call__(self, train_state: TrainState, step: int):\n wandb_metrics = {}\n modal_policy_fns = {\n mode: partial(\n get_policy_sampled_actions,\n train_state,\n zero_text=self.zero_text,\n samples_per_state=1,\n policy_mode=mode,\n )\n for mode in self.modes_to_evaluate\n }\n for rollout_visualizer in self.rollout_visualizers:\n for mode, policy_fn in modal_policy_fns.items():\n logging.info(f\"Running rollouts for {rollout_visualizer.env_name}\")\n rollout_infos = rollout_visualizer.run_rollouts(\n policy_fn, n_rollouts=self.trajs_for_rollouts\n )\n wandb_metrics[\n f\"rollouts_{rollout_visualizer.env_name}_chunk{rollout_visualizer.action_chunk}/{mode}\"\n ] = rollout_infos\n\n return wandb_metrics" }, { "identifier": "SaveCallback", "path": "octo/utils/train_callbacks.py", "snippet": "class SaveCallback(Callback):\n \"\"\"Callback that saves checkpoints to `save_dir`. If `save_dir` is None, does nothing.\"\"\"\n\n save_dir: Optional[str]\n\n def __post_init__(self):\n if self.save_dir is not None:\n if not self.save_dir.startswith(\"gs://\"):\n self.save_dir = os.path.abspath(self.save_dir)\n if jax.process_index() == 0:\n tf.io.gfile.makedirs(self.save_dir)\n logging.info(f\"Created {self.save_dir}\")\n # make checkpointers\n # only keep latest full TrainState\n self.state_checkpointer = orbax.checkpoint.CheckpointManager(\n tf.io.gfile.join(self.save_dir, \"state\"),\n orbax.checkpoint.PyTreeCheckpointer(),\n options=orbax.checkpoint.CheckpointManagerOptions(\n max_to_keep=1,\n ),\n )\n # keep every params checkpoint\n self.params_checkpointer = orbax.checkpoint.CheckpointManager(\n self.save_dir,\n orbax.checkpoint.PyTreeCheckpointer(),\n )\n\n def __call__(self, train_state: TrainState, step: int):\n if self.save_dir is not None:\n train_state.model.save_pretrained(\n step, checkpoint_manager=self.params_checkpointer\n )\n self.state_checkpointer.save(\n step,\n train_state,\n {\"save_args\": orbax_utils.save_args_from_target(train_state)},\n )" }, { "identifier": "ValidationCallback", "path": "octo/utils/train_callbacks.py", "snippet": "class ValidationCallback(Callback):\n loss_fn: Callable\n process_batch_fn: Callable[[Data], Data]\n text_processor: Optional[TextProcessor]\n val_dataset_kwargs_list: Sequence[Mapping[str, Any]]\n dataset_kwargs: Mapping[str, Any]\n val_shuffle_buffer_size: int\n num_val_batches: int\n modes_to_evaluate: Sequence[str] = (\"text_conditioned\", \"image_conditioned\")\n train: bool = False\n\n def __post_init__(self):\n if self.text_processor is not None:\n self.zero_text = jax.tree_map(\n lambda x: x[0], self.text_processor.encode(\"\")\n )\n self.val_iterators = {}\n for single_dataset_kwargs in self.val_dataset_kwargs_list:\n val_dataset = create_validation_dataset(\n single_dataset_kwargs,\n self.dataset_kwargs[\"traj_transform_kwargs\"],\n self.dataset_kwargs[\"frame_transform_kwargs\"],\n train=self.train,\n )\n val_iterator = (\n val_dataset.unbatch()\n .shuffle(self.val_shuffle_buffer_size)\n .repeat()\n .batch(self.dataset_kwargs[\"batch_size\"])\n .iterator(prefetch=0)\n )\n val_iterator = map(self.process_batch_fn, val_iterator)\n self.val_iterators[single_dataset_kwargs[\"name\"]] = val_iterator\n\n @partial(\n jax.jit,\n out_shardings=jax.sharding.PositionalSharding(jax.devices()).replicate(),\n )\n def eval_step(state: TrainState, batch: Data):\n loss_fn_partial = partial(\n self.loss_fn,\n params=state.model.params,\n rng=state.rng,\n train=False,\n )\n all_tasks = {}\n\n if \"base\" in self.modes_to_evaluate:\n all_tasks[\"base\"] = batch[\"task\"]\n if \"image_conditioned\" in self.modes_to_evaluate:\n all_tasks[\"image_conditioned\"] = remove_text(\n batch[\"task\"], self.zero_text\n )\n if \"text_conditioned\" in self.modes_to_evaluate:\n all_tasks[\"text_conditioned\"] = remove_images(batch[\"task\"])\n\n if \"unconditioned\" in self.modes_to_evaluate:\n all_tasks[\"unconditioned\"] = remove_text(\n remove_images(batch[\"task\"]), self.zero_text\n )\n return {\n k: loss_fn_partial(batch=flax.core.copy(batch, {\"task\": tasks}))[1]\n for k, tasks in all_tasks.items()\n }\n\n self.eval_step = eval_step\n\n def __call__(self, train_state: TrainState, step: int):\n wandb_metrics = {}\n for name, val_data_iter in self.val_iterators.items():\n metrics = []\n for _, batch in tqdm.tqdm(\n zip(range(self.num_val_batches), val_data_iter),\n total=self.num_val_batches,\n desc=name,\n ):\n metrics.append(self.eval_step(train_state, batch))\n metrics = jax.tree_map(lambda *xs: np.mean(xs), *metrics)\n wandb_metrics[f\"validation_{name}\"] = metrics\n return wandb_metrics" }, { "identifier": "VisualizationCallback", "path": "octo/utils/train_callbacks.py", "snippet": "class VisualizationCallback(Callback):\n text_processor: TextProcessor\n val_dataset_kwargs_list: Sequence[Mapping[str, Any]]\n dataset_kwargs: Mapping[str, Any]\n eval_batch_size: int\n trajs_for_metrics: int\n trajs_for_viz: int\n samples_per_state: int\n modes_to_evaluate: str = (\"text_conditioned\", \"image_conditioned\")\n train: bool = False\n\n def __post_init__(self):\n self.zero_text = jax.tree_map(lambda x: x[0], self.text_processor.encode(\"\"))\n\n self.visualizers = {}\n for single_dataset_kwargs in self.val_dataset_kwargs_list:\n val_dataset = create_validation_dataset(\n single_dataset_kwargs,\n self.dataset_kwargs[\"traj_transform_kwargs\"],\n self.dataset_kwargs[\"frame_transform_kwargs\"],\n train=self.train,\n )\n self.visualizers[single_dataset_kwargs[\"name\"]] = Visualizer(\n val_dataset, text_processor=self.text_processor, freeze_trajs=False\n )\n\n def __call__(self, train_state: TrainState, step: int):\n wandb_metrics = {}\n modal_policy_fns = {\n mode: batched_apply(\n partial(\n get_policy_sampled_actions,\n train_state,\n zero_text=self.zero_text,\n samples_per_state=self.samples_per_state,\n policy_mode=mode,\n ),\n self.eval_batch_size,\n )\n for mode in self.modes_to_evaluate\n }\n\n for name, visualizer in self.visualizers.items():\n for mode, policy_fn in modal_policy_fns.items():\n if self.trajs_for_metrics > 0:\n raw_infos = visualizer.raw_evaluations(\n policy_fn, max_trajs=self.trajs_for_metrics\n )\n metrics = visualizer.metrics_for_wandb(raw_infos)\n wandb_metrics[f\"offline_metrics_{name}/{mode}\"] = metrics\n if self.trajs_for_viz > 0:\n images = visualizer.visualize_for_wandb(\n policy_fn, max_trajs=self.trajs_for_viz\n )\n wandb_metrics[f\"visualizations_{name}/{mode}\"] = images\n return wandb_metrics" }, { "identifier": "check_config_diff", "path": "octo/utils/train_utils.py", "snippet": "def check_config_diff(new_conf: Config, old_conf: Config, silent: bool = False):\n \"\"\"Checks for differences between new config and old config dicts.\"\"\"\n new_conf_flat = flax.traverse_util.flatten_dict(\n new_conf.to_dict() if isinstance(new_conf, ConfigDict) else new_conf\n )\n old_conf_flat = flax.traverse_util.flatten_dict(\n old_conf.to_dict() if isinstance(old_conf, ConfigDict) else old_conf\n )\n\n # check for missing / new keys\n if set(new_conf_flat.keys()) != set(old_conf_flat.keys()) and not silent:\n logging.info(\n \"New config contains extra items: %s\",\n set(new_conf_flat.keys()) - set(old_conf_flat.keys()),\n )\n logging.info(\n \"New config doesn't contain items: %s\",\n set(old_conf_flat.keys()) - set(new_conf_flat.keys()),\n )\n\n # print differing key values\n mismatched_keys = {\n k: (new_conf_flat[k], old_conf_flat[k])\n for k in new_conf_flat\n if k in old_conf_flat and new_conf_flat[k] != old_conf_flat[k]\n }\n if mismatched_keys and not silent:\n logging.info(\n \"New config contains keys with new values: %s\",\n flax.core.pretty_repr(mismatched_keys),\n )\n return mismatched_keys or (set(new_conf_flat.keys()) != set(old_conf_flat.keys()))" }, { "identifier": "create_optimizer", "path": "octo/utils/train_utils.py", "snippet": "def create_optimizer(\n params_or_params_shape: Params, **kwargs: dict\n) -> optax.GradientTransformation:\n \"\"\"Creates optimizer for Octo.\n\n kwargs are the kwargs for optax.adamw; if the \"learning_rate\" key is a dict, it is interpreted\n as the kwargs for create_lr_schedule (see above), otherwise it is interpreted as a constant\n learning rate.\n\n If clip_gradient is specified, then gradient clipping is applied. If frozen_keys is specified,\n then those parameters are frozen (i.e. not updated) during training.\n\n Returns:\n tx: an Optax optimizer\n lr_callable: Function that takes the current step and returns the learning rate\n \"\"\"\n if isinstance(kwargs[\"learning_rate\"], dict):\n lr_callable = create_lr_schedule(**kwargs[\"learning_rate\"])\n else:\n lr_callable = lambda _: kwargs[\"learning_rate\"]\n kwargs[\"learning_rate\"] = lr_callable\n\n # Following ViT, timm, MAE: this mask skips weight decay on biases and LayerNorm parameters\n wd_mask = jax.tree_util.tree_map_with_path(\n lambda path, x: \"kernel\" in jax.tree_util.keystr(path), params_or_params_shape\n )\n\n clip_gradient = kwargs.pop(\"clip_gradient\", None)\n frozen_keys = kwargs.pop(\"frozen_keys\", None)\n grad_accumulation_steps = kwargs.pop(\"grad_accumulation_steps\", None)\n\n tx = optax.adamw(mu_dtype=jnp.bfloat16, **kwargs, mask=wd_mask)\n if grad_accumulation_steps:\n tx = optax.MultiSteps(tx, grad_accumulation_steps)\n if clip_gradient is not None:\n tx = optax.chain(\n optax.clip_by_global_norm(clip_gradient),\n tx,\n )\n\n if frozen_keys:\n tx, param_partitions = freeze_weights(\n tx, params_or_params_shape, frozen_keys, return_partitions=True\n )\n zero_frozen_params = lambda params: jax.tree_map(\n lambda x, y: x if y == \"trainable\" else jnp.zeros(()),\n params,\n param_partitions,\n )\n param_norm_callable = lambda params: optax.global_norm(\n zero_frozen_params(params)\n )\n else:\n param_norm_callable = optax.global_norm\n\n return tx, lr_callable, param_norm_callable" }, { "identifier": "format_name_with_config", "path": "octo/utils/train_utils.py", "snippet": "def format_name_with_config(name, config):\n \"\"\"Formats a name string with a config dict.\n\n Formatting keys may be specified as {key} or {full_path_to_key_with_underscores}.\n\n Example:\n name = \"model_{model_type}_{model_size}\"\n config = {\"model_type\": \"transformer\", \"model_size\": \"small\"}\n format_name_with_config(name, config) -> \"model_transformer_small\"\n \"\"\"\n config_flat = flax.traverse_util.flatten_dict(config, sep=\"_\")\n config_final = {k.split(\"_\")[-1]: v for k, v in config_flat.items()}\n format_dict = {**config_final, **config_flat}\n return name.format(**format_dict)" }, { "identifier": "merge_params", "path": "octo/utils/train_utils.py", "snippet": "def merge_params(target_params: Params, pretrained_params: Params) -> Params:\n \"\"\"Copies pre-trained params into target_params for every param that has corresponding key + shape.\"\"\"\n flat_target_params = flax.traverse_util.flatten_dict(target_params)\n flat_pretrained_params = flax.traverse_util.flatten_dict(pretrained_params)\n keys_to_update = [\n k\n for k in flat_target_params\n if k in flat_pretrained_params\n and flat_target_params[k].shape == flat_pretrained_params[k].shape\n ]\n missing_keys = [k for k in flat_target_params if k not in flat_pretrained_params]\n shape_mismatch_keys = [\n k\n for k in flat_target_params\n if k in flat_pretrained_params\n and flat_target_params[k].shape != flat_pretrained_params[k].shape\n ]\n\n for key in keys_to_update:\n logging.debug(f\"Param copied from pre-trained: {'.'.join(key)}\")\n if missing_keys or shape_mismatch_keys:\n logging.info(\"########## Parameters skipped during model loading: ##########\")\n for key in missing_keys:\n logging.info(\n f\"Param missing in pre-trained model, skipping: {'.'.join(key)}\"\n )\n for key in shape_mismatch_keys:\n logging.info(\n f\"Param with differing shape in pre-trained model, skipping: {'.'.join(key)}\"\n )\n\n flat_target_params = flax.core.copy(\n flat_target_params, {k: flat_pretrained_params[k] for k in keys_to_update}\n )\n target_params = flax.traverse_util.unflatten_dict(flat_target_params)\n return target_params" }, { "identifier": "process_text", "path": "octo/utils/train_utils.py", "snippet": "def process_text(batch: Data, text_processor: Optional[TextProcessor]) -> Data:\n \"\"\"Encodes the language instruction inside the tasks for a batch.\n\n If the text processor is None, removes language entirely from the tasks.\n Expects batch to be a nested dictionary, where\n batch[\"task\"][\"language_instruction\"] is a sequence of byte strings\n \"\"\"\n if text_processor is None:\n batch[\"task\"].pop(\"language_instruction\")\n else:\n batch[\"task\"][\"language_instruction\"] = text_processor.encode(\n [s.decode(\"utf-8\") for s in batch[\"task\"][\"language_instruction\"]]\n )\n return batch" }, { "identifier": "Timer", "path": "octo/utils/train_utils.py", "snippet": "class Timer:\n \"\"\"\n Timer utility. Usage:\n\n timer = Timer()\n with timer(\"foo\"):\n do_something()\n\n timer.tick(\"bar\")\n do_something_else()\n timer.tock(\"bar\")\n\n timer.get_average_times() -> {\"foo\": 0.1, \"bar\": 0.2}\n \"\"\"\n\n def __init__(self):\n self.reset()\n\n @contextmanager\n def __call__(self, key):\n self.tick(key)\n try:\n yield None\n finally:\n self.tock(key)\n\n def reset(self):\n self.counts = defaultdict(int)\n self.times = defaultdict(float)\n self.start_times = {}\n\n def tick(self, key):\n if key in self.start_times:\n raise ValueError(f\"Timer is already ticking for key: {key}\")\n self.start_times[key] = time.time()\n\n def tock(self, key):\n if key not in self.start_times:\n raise ValueError(f\"Timer is not ticking for key: {key}\")\n self.counts[key] += 1\n self.times[key] += time.time() - self.start_times[key]\n del self.start_times[key]\n\n def get_average_times(self, reset=True):\n ret = {key: self.times[key] / self.counts[key] for key in self.counts}\n if reset:\n self.reset()\n return ret" }, { "identifier": "TrainState", "path": "octo/utils/train_utils.py", "snippet": "class TrainState:\n rng: PRNGKey\n model: OctoModel\n step: int\n opt_state: optax.OptState\n tx: optax.GradientTransformation = struct.field(pytree_node=False)\n\n @classmethod\n def create(\n cls,\n rng: PRNGKey,\n model: OctoModel,\n tx: optax.GradientTransformation,\n ):\n opt_state = tx.init(model.params)\n return cls(\n rng=rng,\n model=model,\n step=0,\n opt_state=opt_state,\n tx=tx,\n )\n\n def apply_gradients(self, *, grads, rng):\n updates, new_opt_state = self.tx.update(\n grads, self.opt_state, self.model.params\n )\n new_params = optax.apply_updates(self.model.params, updates)\n\n return self.replace(\n step=self.step + 1,\n model=self.model.replace(params=new_params),\n opt_state=new_opt_state,\n rng=rng,\n )" } ]
import datetime import imp import os import flax import jax import optax import tensorflow as tf import tqdm import wandb from functools import partial from absl import app, flags, logging from flax.traverse_util import flatten_dict from jax.sharding import Mesh, NamedSharding, PartitionSpec from ml_collections import config_flags, ConfigDict from octo.data.dataset import make_single_dataset from octo.model.octo_model import OctoModel from octo.utils.jax_utils import initialize_compilation_cache from octo.utils.spec import ModuleSpec from octo.utils.train_callbacks import ( RolloutVisualizationCallback, SaveCallback, ValidationCallback, VisualizationCallback, ) from octo.utils.train_utils import ( check_config_diff, create_optimizer, format_name_with_config, merge_params, process_text, Timer, TrainState, ) from jax_smi import initialise_tracking # type: ignore
10,530
), f"Batch size ({FLAGS.config.batch_size}) must be divisible by the number of devices ({len(devices)})" assert ( FLAGS.config.viz_kwargs.eval_batch_size % len(devices) == 0 ), f"Eval batch size ({FLAGS.config.viz_kwargs.eval_batch_size}) must be divisible by the number of devices ({len(devices)})" # create a 1D mesh with a single axis named "batch" mesh = Mesh(jax.devices(), axis_names="batch") # Our batches will be data-parallel sharded -- each device will get a slice of the batch dp_sharding = NamedSharding(mesh, PartitionSpec("batch")) # Our model will be replicated across devices (we are only doing data parallelism, not model parallelism) replicated_sharding = NamedSharding(mesh, PartitionSpec()) # prevent tensorflow from using GPU memory since it's only used for data loading tf.config.set_visible_devices([], "GPU") ######### # # Setup WandB # ######### name = format_name_with_config( FLAGS.name, FLAGS.config.to_dict(), ) wandb_id = "{name}_{time}".format( name=name, time=datetime.datetime.now().strftime("%Y%m%d_%H%M%S"), ) wandb.init( config=FLAGS.config.to_dict(), id=wandb_id, name=name, mode="disabled" if FLAGS.debug else None, **FLAGS.config.wandb, ) ######### # # Load Pretrained model + optionally modify config # ######### pretrained_model = OctoModel.load_pretrained( FLAGS.config.pretrained_path, step=FLAGS.config.pretrained_step, ) flat_config = flax.traverse_util.flatten_dict( pretrained_model.config, keep_empty_nodes=True ) for d_key in flax.traverse_util.flatten_dict( FLAGS.config.get("config_delete_keys", ConfigDict()).to_dict() ): for c_key in list(flat_config.keys()): if ".".join(c_key).startswith(".".join(d_key)): del flat_config[c_key] config = ConfigDict(flax.traverse_util.unflatten_dict(flat_config)) config.update(FLAGS.config.get("update_config", ConfigDict())) config = config.to_dict() check_config_diff(config, pretrained_model.config) ######### # # Setup Data Loader # ######### # create text processor if config["text_processor"] is None: text_processor = None else: text_processor = ModuleSpec.instantiate(config["text_processor"])() def process_batch(batch): batch = process_text(batch, text_processor) del batch["dataset_name"] return batch # load standardize_fn from `path/to/file.py:fn_name` format if ( standardize_fn := FLAGS.config["dataset_kwargs"].get("standardize_fn", None) ) is not None: path, name = standardize_fn.split(":") # imp is deprecated, but it's also what ml_collections uses standardize_fn = getattr(imp.load_source("standardize_fn", path), name) del FLAGS.config["dataset_kwargs"]["standardize_fn"] FLAGS.config["dataset_kwargs"]["standardize_fn"] = standardize_fn dataset = make_single_dataset( FLAGS.config.dataset_kwargs, traj_transform_kwargs=FLAGS.config.traj_transform_kwargs, frame_transform_kwargs=FLAGS.config.frame_transform_kwargs, train=True, ) train_data_iter = ( dataset.repeat() .unbatch() .shuffle(FLAGS.config.shuffle_buffer_size) .batch(FLAGS.config.batch_size) .iterator() ) train_data_iter = map(process_batch, train_data_iter) example_batch = next(train_data_iter) ######### # # Load Pretrained Model # ######### rng = jax.random.PRNGKey(FLAGS.config.seed) rng, init_rng = jax.random.split(rng) model = OctoModel.from_config( config, example_batch, text_processor, rng=init_rng, dataset_statistics=dataset.dataset_statistics, )
try: initialise_tracking() except ImportError: pass FLAGS = flags.FLAGS flags.DEFINE_string("name", "experiment", "Experiment name.") flags.DEFINE_bool("debug", False, "Debug config (no wandb logging)") default_config_file = os.path.join( os.path.dirname(__file__), "configs/finetune_config.py" ) config_flags.DEFINE_config_file( "config", default_config_file, "File path to the training hyperparameter configuration.", lock_config=False, ) def main(_): initialize_compilation_cache() devices = jax.devices() logging.info( f""" Octo Finetuning Script ====================== Pretrained model: {FLAGS.config.pretrained_path} Finetuning Dataset: {FLAGS.config.dataset_kwargs.name} Data dir: {FLAGS.config.dataset_kwargs.data_dir} Task Modality: {FLAGS.config.modality} Finetuning Mode: {FLAGS.config.finetuning_mode} # Devices: {jax.device_count()} Batch size: {FLAGS.config.batch_size} ({FLAGS.config.batch_size // len(devices) } per device) # Steps: {FLAGS.config.num_steps} """ ) ######### # # Setup Jax Data Parallelism # ######### assert ( FLAGS.config.batch_size % len(devices) == 0 ), f"Batch size ({FLAGS.config.batch_size}) must be divisible by the number of devices ({len(devices)})" assert ( FLAGS.config.viz_kwargs.eval_batch_size % len(devices) == 0 ), f"Eval batch size ({FLAGS.config.viz_kwargs.eval_batch_size}) must be divisible by the number of devices ({len(devices)})" # create a 1D mesh with a single axis named "batch" mesh = Mesh(jax.devices(), axis_names="batch") # Our batches will be data-parallel sharded -- each device will get a slice of the batch dp_sharding = NamedSharding(mesh, PartitionSpec("batch")) # Our model will be replicated across devices (we are only doing data parallelism, not model parallelism) replicated_sharding = NamedSharding(mesh, PartitionSpec()) # prevent tensorflow from using GPU memory since it's only used for data loading tf.config.set_visible_devices([], "GPU") ######### # # Setup WandB # ######### name = format_name_with_config( FLAGS.name, FLAGS.config.to_dict(), ) wandb_id = "{name}_{time}".format( name=name, time=datetime.datetime.now().strftime("%Y%m%d_%H%M%S"), ) wandb.init( config=FLAGS.config.to_dict(), id=wandb_id, name=name, mode="disabled" if FLAGS.debug else None, **FLAGS.config.wandb, ) ######### # # Load Pretrained model + optionally modify config # ######### pretrained_model = OctoModel.load_pretrained( FLAGS.config.pretrained_path, step=FLAGS.config.pretrained_step, ) flat_config = flax.traverse_util.flatten_dict( pretrained_model.config, keep_empty_nodes=True ) for d_key in flax.traverse_util.flatten_dict( FLAGS.config.get("config_delete_keys", ConfigDict()).to_dict() ): for c_key in list(flat_config.keys()): if ".".join(c_key).startswith(".".join(d_key)): del flat_config[c_key] config = ConfigDict(flax.traverse_util.unflatten_dict(flat_config)) config.update(FLAGS.config.get("update_config", ConfigDict())) config = config.to_dict() check_config_diff(config, pretrained_model.config) ######### # # Setup Data Loader # ######### # create text processor if config["text_processor"] is None: text_processor = None else: text_processor = ModuleSpec.instantiate(config["text_processor"])() def process_batch(batch): batch = process_text(batch, text_processor) del batch["dataset_name"] return batch # load standardize_fn from `path/to/file.py:fn_name` format if ( standardize_fn := FLAGS.config["dataset_kwargs"].get("standardize_fn", None) ) is not None: path, name = standardize_fn.split(":") # imp is deprecated, but it's also what ml_collections uses standardize_fn = getattr(imp.load_source("standardize_fn", path), name) del FLAGS.config["dataset_kwargs"]["standardize_fn"] FLAGS.config["dataset_kwargs"]["standardize_fn"] = standardize_fn dataset = make_single_dataset( FLAGS.config.dataset_kwargs, traj_transform_kwargs=FLAGS.config.traj_transform_kwargs, frame_transform_kwargs=FLAGS.config.frame_transform_kwargs, train=True, ) train_data_iter = ( dataset.repeat() .unbatch() .shuffle(FLAGS.config.shuffle_buffer_size) .batch(FLAGS.config.batch_size) .iterator() ) train_data_iter = map(process_batch, train_data_iter) example_batch = next(train_data_iter) ######### # # Load Pretrained Model # ######### rng = jax.random.PRNGKey(FLAGS.config.seed) rng, init_rng = jax.random.split(rng) model = OctoModel.from_config( config, example_batch, text_processor, rng=init_rng, dataset_statistics=dataset.dataset_statistics, )
merged_params = merge_params(model.params, pretrained_model.params)
11
2023-12-13 09:58:56+00:00
12k
LinShan-Bin/OccNeRF
networks/occupancy_decoder.py
[ { "identifier": "geom", "path": "utils/geom.py", "snippet": "def eye_4x4(B, device='cuda'):\ndef safe_inverse(a): #parallel version\ndef safe_inverse_single(a):\ndef apply_4x4(RT, xyz):\ndef get_camM_T_camXs(origin_T_camXs, ind=0):\ndef split_rt_single(rt):\ndef split_rt(rt):\ndef merge_rt(r, t):\ndef xyd2pointcloud(xyd, pix_T_cam):\ndef pixels2camera(x, y, z, fx, fy, x0, y0):\ndef camera2pixels(xyz, pix_T_cam):\ndef scale_intrinsics(K, sx, sy):\ndef split_intrinsics(K):\ndef merge_intrinsics(fx, fy, x0, y0):\ndef merge_rtlist(rlist, tlist):\ndef split_lrtlist(lrtlist):\ndef merge_lrtlist(lenlist, rtlist):\ndef apply_4x4_to_lrtlist(Y_T_X, lrtlist_X):\ndef apply_4x4_to_lrt(Y_T_X, lrt_X):\ndef get_xyzlist_from_lenlist(lenlist):\ndef get_xyzlist_from_lrtlist(lrtlist, include_clist=False):\ndef get_clist_from_lrtlist(lrtlist):\ndef wrap2pi(rad_angle):\ndef unproject(cam2world, intrinsic, depth):\ndef reproject(cam2world_src, cam2world_tar, W, H, intrinsic, depth_src, depth_tar, color_tar, mask_tar):\n def make_grid(x, y):\ndef visualize_depth(depth, mask=None, depth_min=None, depth_max=None, direct=False):\ndef mat2pose_vec(matrix: torch.Tensor):\ndef square_distance(src, dst):\n B, _, _ = list(a.shape)\n B, N, _ = list(xyz.shape)\n B, S = list(origin_T_camXs.shape)[0:2]\n B, C, D = list(r.shape)\n B2, D2 = list(t.shape)\n B, N, C = list(xyd.shape)\n B = x.shape[0]\n B = list(z.shape)[0]\n EPS = 1e-4\n K = merge_intrinsics(fx, fy, x0, y0)\n B = list(fx.shape)[0]\n K = torch.zeros(B, 4, 4, dtype=torch.float32, device=fx.device)\n K[:,0,0] = fx\n K[:,1,1] = fy\n K[:,0,2] = x0\n K[:,1,2] = y0\n K[:,2,2] = 1.0\n K[:,3,3] = 1.0\n B, N, D, E = list(rlist.shape)\n B, N, F = list(tlist.shape)\n B, N, D = list(lrtlist.shape)\n B, N, D = list(lenlist.shape)\n B2, N2, E, F = list(rtlist.shape)\n B, N, D = list(lrtlist_X.shape)\n B2, E, F = list(Y_T_X.shape)\n B, D = list(lrt_X.shape)\n B2, E, F = list(Y_T_X.shape)\n B, N, D = list(lenlist.shape)\n B, N, D = list(lrtlist.shape)\n B, N, D = list(lrtlist.shape)\n B, N, _ = src.shape\n _, M, _ = dst.shape" }, { "identifier": "vox", "path": "utils/vox.py", "snippet": "def world2contracted(xyz_world, pc_range_roi=[-52, -52, 0, 52, 52, 6], ratio=0.8):\ndef contracted2world(xyz_contracted, pc_range_roi=[-80, -80, -3, 80, 80, 8], ratio=0.8):\n def __init__(self, Z, Y, X, scene_centroid, bounds, position = 'embedding', length_pose_encoding = 3, opt = None, pad=None, assert_cube=False):\n def Ref2Mem(self, xyz, Z, Y, X, assert_cube=False):\n def Mem2Ref(self, xyz_mem, Z, Y, X, assert_cube=False):\n def get_mem_T_ref(self, B, Z, Y, X, assert_cube=False, device='cuda'):\n def get_ref_T_mem(self, B, Z, Y, X, assert_cube=False, device='cuda'):\n def get_inbounds(self, xyz, Z, Y, X, already_mem=False, padding=0.0, assert_cube=False):\n def voxelize_xyz(self, xyz_ref, Z, Y, X, already_mem=False, assert_cube=False, clean_eps=0):\n def voxelize_xyz_and_feats(self, xyz_ref, feats, Z, Y, X, already_mem=False, assert_cube=False, clean_eps=0):\n def get_occupancy(self, xyz, Z, Y, X, clean_eps=0, xyz_zero=None):\n def get_feat_occupancy(self, xyz, feat, Z, Y, X, clean_eps=0, xyz_zero=None):\n def unproject_image_to_mem(self, rgb_camB, pixB_T_camA, camB_T_camA, Z, Y, X, assert_cube=False):\n def get_meta_data(self, cam_center, camB_T_camA = None, abs_position=False, assert_cube=False):\n def get_voxel_position(self, cam_center, abs_position=True, assert_cube=False):\n def apply_mem_T_ref_to_lrtlist(self, lrtlist_cam, Z, Y, X, assert_cube=False):\nclass Vox_util(nn.Module):\n B, N, C = list(xyz.shape)\n B, N, C = list(xyz_mem.shape)\n B, N, D = list(xyz_ref.shape)\n B, N, D = list(xyz_ref.shape)\n B2, N2, D2 = list(feats.shape)\n B, N, C = list(xyz.shape)\n B, N, C = list(xyz.shape)\n B2, N2, D2 = list(feat.shape)\n B, C, H, W = list(rgb_camB.shape)\n EPS=1e-6\n Z, Y, X = self.Z, self.Y, self.X\n Z, Y, X = self.Z, self.Y, self.X\n B, N, C = list(lrtlist_cam.shape)" }, { "identifier": "basic", "path": "utils/basic.py", "snippet": "EPS = 1e-6\n B_, S = shapelist[:2]\n BS = shapelist[0]\n S = int(BS/B)\ndef strnum(x):\ndef matmul2(mat1, mat2):\ndef pack_seqdim(tensor, B):\ndef unpack_seqdim(tensor, B):\ndef reduce_masked_mean(x, mask, dim=None, keepdim=False):\ndef meshgrid3d(B, Z, Y, X, stack=False, norm=False, device='cuda'):\ndef gridcloud3d(B, Z, Y, X, norm=False, device='cuda'):\ndef normalize_grid2d(grid_y, grid_x, Y, X, clamp_extreme=True):" }, { "identifier": "render", "path": "utils/render.py", "snippet": "def get_rays(H, W, K, c2w, inverse_y, flip_x, flip_y, mode='center'):\ndef ndc_rays(H, W, focal, near, rays_o, rays_d):\ndef get_rays_of_a_view(H, W, K, c2w, ndc, inverse_y, flip_x, flip_y, mode='center'):\ndef cumprod_exclusive(p):\ndef get_ray_marching_ray(alpha):\ndef sample_ray(self, rays_o, rays_d, near, far, stepsize, xyz_min, xyz_max, voxel_size, is_train=False):\n def __init__(self, init_val, beta_min=0.0001):\n def forward(self, sdf, beta=None):\n def get_beta(self):\n def __init__(self, init_val, beta_min=0.0001):\n def forward(self, sdf, beta=None):\n def get_beta(self):\n def __init__(self, init_val):\n def forward(self, x):\n def get_variance(self):\nclass SigmoidDensity(nn.Module): # alpha * Laplace(loc=0, scale=beta).cdf(-sdf)\nclass LaplaceDensity(nn.Module): # alpha * Laplace(loc=0, scale=beta).cdf(-sdf)\nclass SingleVarianceNetwork(nn.Module):" }, { "identifier": "S3DCNN", "path": "networks/_3DCNN.py", "snippet": "class S3DCNN(nn.Module):\n def __init__(self, input_planes = 64, out_planes = 1, planes = 16, conv_3d_types1 = \"3D\", activate_fun = nn.ReLU(inplace=True), opt = None):\n super(S3DCNN, self).__init__()\n self.out_planes = out_planes\n\n self.opt = opt\n self.dres0 = nn.Sequential(convbn_3d(input_planes, planes*2, 3, 1, 1, conv_3d_types = conv_3d_types1),\n activate_fun,\n convbn_3d(planes*2, planes*2, 3, 1, 1, conv_3d_types = conv_3d_types1),\n activate_fun)\n\n\n self.dres1 = nn.Sequential(convbn_3d(planes*2, planes*2, 3, 1, 1, conv_3d_types = conv_3d_types1),\n activate_fun,\n convbn_3d(planes*2, planes*2, 3, 1, 1, conv_3d_types = conv_3d_types1))\n\n self.dres2 = hourglass_PSMNet(planes*2, conv_3d_types1 = conv_3d_types1, activate_fun = activate_fun)\n\n self.dres3 = hourglass_PSMNet(planes*2, conv_3d_types1 = conv_3d_types1, activate_fun = activate_fun)\n\n self.dres4 = hourglass_PSMNet(planes*2, conv_3d_types1 = conv_3d_types1, activate_fun = activate_fun)\n\n\n self.classif1 = nn.Sequential(convbn_3d(planes*2, planes*2, 3, 1, 1, conv_3d_types = conv_3d_types1),\n activate_fun,\n nn.Conv3d(planes*2, out_planes, kernel_size=3, padding=1, stride=1,bias=False))\n\n self.classif2 = nn.Sequential(convbn_3d(planes*2, planes*2, 3, 1, 1, conv_3d_types = conv_3d_types1),\n activate_fun,\n nn.Conv3d(planes*2, out_planes, kernel_size=3, padding=1, stride=1,bias=False))\n\n\n\n self.classif3 = nn.Sequential(convbn_3d(planes * 2, planes * 2, 3, 1, 1, conv_3d_types=conv_3d_types1),\n activate_fun,\n\n convbn_3d(planes * 2, planes * 2, 3, 1, 1, conv_3d_types=conv_3d_types1),\n activate_fun,\n\n convbn_3d(planes * 2, planes * 2, 3, 1, 1, conv_3d_types=conv_3d_types1),\n activate_fun,\n\n convbn_3d(planes * 2, planes * 2, 3, 1, 1, conv_3d_types=conv_3d_types1),\n activate_fun,\n\n convbn_3d(planes * 2, self.out_planes, 3, 1, 1, conv_3d_types=conv_3d_types1),)\n if self.opt.use_semantic:\n self.classif_semantic = nn.Sequential(convbn_3d(planes * 2, planes * 2, 3, 1, 1, conv_3d_types=conv_3d_types1),\n activate_fun,\n\n convbn_3d(planes * 2, planes * 2, 3, 1, 1, conv_3d_types=conv_3d_types1),\n activate_fun,\n\n convbn_3d(planes * 2, planes * 2, 3, 1, 1, conv_3d_types=conv_3d_types1),\n activate_fun,\n\n convbn_3d(planes * 2, planes * 2, 3, 1, 1, conv_3d_types=conv_3d_types1),\n activate_fun,\n\n convbn_3d(planes * 2, self.opt.semantic_classes, 3, 1, 1, conv_3d_types=conv_3d_types1),)\n\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.Conv3d):\n n = m.kernel_size[0] * m.kernel_size[1]*m.kernel_size[2] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm3d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n m.bias.data.zero_()\n\n if self.opt.render_type == 'density':\n pass\n \n def geo_param(self):\n return list(self.dres0.parameters()) + \\\n list(self.dres1.parameters()) + \\\n list(self.dres2.parameters()) + \\\n list(self.dres3.parameters()) + \\\n list(self.dres4.parameters()) + \\\n list(self.classif1.parameters()) + \\\n list(self.classif2.parameters()) + \\\n list(self.classif3.parameters())\n \n def sem_head_param(self):\n if self.opt.use_semantic:\n return self.classif_semantic.parameters()\n else:\n return None\n\n def forward(self, cost):\n\n cost0 = self.dres0(cost)\n cost0 = self.dres1(cost0) + cost0\n\n out1, pre1, post1 = self.dres2(cost0, None, None)\n\n out1 = out1+cost0\n\n out2, pre2, post2 = self.dres3(out1, pre1, post1)\n out2 = out2+cost0\n\n out3, pre3, post3 = self.dres4(out2, pre1, post2)\n\n if self.opt.use_semantic:\n if self.opt.last_free:\n out = self.classif_semantic(out3)\n else:\n semantic = self.classif_semantic(out3)\n cost3 = self.classif3(out3)\n out = torch.cat([semantic, cost3], dim=1)\n return [out]\n else:\n cost3 = self.classif3(out3)\n return [cost3]" } ]
import pdb import time import torch import torch.nn as nn import torch.nn.functional as F import numpy as np from torch_efficient_distloss import eff_distloss, eff_distloss_native from utils import geom from utils import vox from utils import basic from utils import render from ._3DCNN import S3DCNN
7,266
group_num = xyz.shape[1] // group_size xyz_sem = xyz[:, :group_size * group_num].reshape(xyz.shape[0], group_num, group_size, 3).mean(dim=2) else: xyz_sem = None if avail_mask is not None: if self.opt.contracted_coord: ind_norm = self.norm_func(xyz) avail_mask = self.effective_points_mask(ind_norm) ind_norm = ind_norm[avail_mask] if xyz_sem is not None: avail_mask_sem = avail_mask[:, :group_size * group_num].reshape(avail_mask.shape[0], group_num, group_size).any(dim=-1) ind_norm_sem = self.norm_func(xyz_sem[avail_mask_sem]) else: xyz_masked = xyz[avail_mask] ind_norm = self.norm_func(xyz_masked) if xyz_sem is not None: avail_mask_sem = avail_mask[:, :group_size * group_num].reshape(avail_mask.shape[0], group_num, group_size).any(dim=-1) ind_norm_sem = self.norm_func(xyz_sem[avail_mask_sem]) else: ind_norm = self.norm_func(xyz) if xyz_sem is not None: ind_norm_sem = self.norm_func(xyz_sem) avail_mask_sem = None ind_norm = ind_norm.flip((-1,)) # value range: [-1, 1] shape = ind_norm.shape[:-1] ind_norm = ind_norm.reshape(1, 1, 1, -1, 3) if xyz_sem is None: grid = grids[0] # BCXYZ # torch.Size([1, C, 256, 256, 16]) ret_lst = F.grid_sample(grid, ind_norm, mode='bilinear', align_corners=align_corners).reshape(grid.shape[1], -1).T.reshape(*shape, grid.shape[1]) if self.use_semantic: semantic, feats = ret_lst[..., :self.semantic_classes], ret_lst[..., -1] return feats, avail_mask, semantic else: return ret_lst.squeeze(), avail_mask else: ind_norm_sem = ind_norm_sem.flip((-1,)) shape_sem = ind_norm_sem.shape[:-1] ind_norm_sem = ind_norm_sem.reshape(1, 1, 1, -1, 3) grid_sem = grids[0][:, :self.semantic_classes] # BCXYZ # torch.Size([1, semantic_classes, H, W, Z]) grid_geo = grids[0][:, -1:] # BCXYZ # torch.Size([1, 1, H, W, Z]) ret_sem = F.grid_sample(grid_sem, ind_norm_sem, mode='bilinear', align_corners=align_corners).reshape(grid_sem.shape[1], -1).T.reshape(*shape_sem, grid_sem.shape[1]) ret_geo = F.grid_sample(grid_geo, ind_norm, mode='bilinear', align_corners=align_corners).reshape(grid_geo.shape[1], -1).T.reshape(*shape, grid_geo.shape[1]) return ret_geo.squeeze(), avail_mask, ret_sem, avail_mask_sem, group_num, group_size def sample_ray(self, rays_o, rays_d, is_train): '''Sample query points on rays''' Zval = self.Zval.to(rays_o) if is_train: Zval = Zval.repeat(rays_d.shape[-2], 1) Zval += (torch.rand_like(Zval[:, [0]]) * 0.2 - 0.1) * self.stepsize_log * self.voxel_size Zval = Zval.clamp(min=0.0) Zval = Zval + self.near rays_pts = rays_o[..., None, :] + rays_d[..., None, :] * Zval[..., None] rays_pts_depth = (rays_o[..., None, :] - rays_pts).norm(dim=-1) if self.opt.contracted_coord: # contracted coordiante has infinite perception range mask_outbbox = torch.zeros_like(rays_pts[..., 0]).bool() else: mask_outbbox = ((self.xyz_min > rays_pts) | (rays_pts > self.xyz_max)).any(dim=-1) return rays_pts, mask_outbbox, Zval, rays_pts_depth def effective_points_mask(self, points): '''Mask out points that are too close to each other in the contracted coordinate''' dist = torch.diff(points, dim=-2, prepend=torch.zeros_like(points[..., :1, :])).abs() xyz_thresh = 0.4 / torch.tensor([self.X, self.Y, self.Z]).to(points) mask = (dist > xyz_thresh).bool().any(dim=-1) return mask def activate_density(self, density, dists): return 1 - torch.exp(-F.relu(density) * dists) def get_density(self, rays_o, rays_d, Voxel_feat, is_train, inputs): dtype = torch.float16 if self.opt.use_fp16 else torch.float32 device = rays_o.device rays_o, rays_d, Voxel_feat = rays_o.to(dtype), rays_d.to(dtype), Voxel_feat.to(dtype) reg_loss = {} eps_time = time.time() with torch.no_grad(): rays_o_i = rays_o[0, ...].flatten(0, 2) # HXWX3 rays_d_i = rays_d[0, ...].flatten(0, 2) # HXWX3 rays_pts, mask_outbbox, z_vals, rays_pts_depth = self.sample_ray(rays_o_i, rays_d_i, is_train=is_train) dists = rays_pts_depth[..., 1:] - rays_pts_depth[..., :-1] # [num pixels, num points - 1] dists = torch.cat([dists, 1e4 * torch.ones_like(dists[..., :1])], dim=-1) # [num pixels, num points] sample_ret = self.grid_sampler(rays_pts, Voxel_feat, avail_mask=~mask_outbbox) if self.use_semantic: if self.opt.semantic_sample_ratio < 1.0: geo_feats, mask, semantic, mask_sem, group_num, group_size = sample_ret else: geo_feats, mask, semantic = sample_ret else: geo_feats, mask = sample_ret if self.opt.render_type == 'prob': weights = torch.zeros_like(rays_pts[..., 0]) weights[:, -1] = 1 geo_feats = torch.sigmoid(geo_feats) if self.opt.last_free: geo_feats = 1.0 - geo_feats # the last channel is the probability of being free weights[mask] = geo_feats # accumulate weights = weights.cumsum(dim=1).clamp(max=1) alphainv_fin = weights[..., -1] weights = weights.diff(dim=1, prepend=torch.zeros((rays_pts.shape[:1])).unsqueeze(1).to(device=device, dtype=dtype)) depth = (weights * z_vals).sum(-1) rgb_marched = 0 elif self.opt.render_type == 'density': alpha = torch.zeros_like(rays_pts[..., 0]) # [num pixels, num points] alpha[mask] = self.activate_density(geo_feats, dists[mask])
# Copyright Niantic 2019. Patent Pending. All rights reserved. # # This software is licensed under the terms of the Monodepth2 licence # which allows for non-commercial use only, the full terms of which are made # available in the LICENSE file. from __future__ import absolute_import, division, print_function class VolumeDecoder(nn.Module): def __init__(self, opt): super(VolumeDecoder, self).__init__() self.opt = opt self.use_semantic = self.opt.use_semantic self.semantic_classes = self.opt.semantic_classes self.batch = self.opt.batch_size // self.opt.cam_N self.near = self.opt.min_depth self.far = self.opt.max_depth self.register_buffer('xyz_min', torch.from_numpy( np.array([self.opt.real_size[0], self.opt.real_size[2], self.opt.real_size[4]]))) self.register_buffer('xyz_max', torch.from_numpy( np.array([self.opt.real_size[1], self.opt.real_size[3], self.opt.real_size[5]]))) self.ZMAX = self.opt.real_size[1] self.Z = self.opt.voxels_size[0] self.Y = self.opt.voxels_size[1] self.X = self.opt.voxels_size[2] self.Z_final = self.Z self.Y_final = self.Y self.X_final = self.X self.stepsize = self.opt.stepsize # voxel self.num_voxels = self.Z_final * self.Y_final * self.X_final self.stepsize_log = self.stepsize self.interval = self.stepsize if self.opt.contracted_coord: # Sampling strategy for contracted coordinate contracted_rate = self.opt.contracted_ratio num_id_voxels = int(self.num_voxels * (contracted_rate)**3) self.voxel_size = ((self.xyz_max - self.xyz_min).prod() / num_id_voxels).pow(1 / 3) diagonal = (self.xyz_max - self.xyz_min).pow(2).sum().pow(1 / 2) self.N_samples = int(diagonal / 2 / self.stepsize / self.voxel_size / contracted_rate) if self.opt.infinite_range: # depth_roi = [-self.far] * 3 + [self.far] * 3 zval_roi = [-diagonal] * 3 + [diagonal] * 3 fc = 1 - 0.5 / self.X # avoid NaN zs_contracted = torch.linspace(0.0, fc, steps=self.N_samples) zs_world = vox.contracted2world( zs_contracted[None, :, None].repeat(1, 1, 3), # pc_range_roi=depth_roi, pc_range_roi=zval_roi, ratio=self.opt.contracted_ratio)[:, :, 0] else: zs_world = torch.linspace(0.0, self.N_samples - 1, steps=self.N_samples)[None] * self.stepsize * self.voxel_size self.register_buffer('Zval', zs_world) pc_range_roi = self.xyz_min.tolist() + self.xyz_max.tolist() self.norm_func = lambda xyz: vox.world2contracted(xyz, pc_range_roi=pc_range_roi, ratio=self.opt.contracted_ratio) else: self.N_samples = int(np.linalg.norm(np.array([self.Z_final // 2, self.Y_final // 2, self.X_final // 2]) + 1) / self.stepsize) + 1 self.voxel_size = ((self.xyz_max - self.xyz_min).prod() / self.num_voxels).pow(1 / 3) zs_world = torch.linspace(0.0, self.N_samples - 1, steps=self.N_samples)[None] * self.stepsize * self.voxel_size self.register_buffer('Zval', zs_world) self.norm_func = lambda xyz: (xyz - self.xyz_min.to(xyz)) / (self.xyz_max.to(xyz) - self.xyz_min.to(xyz)) * 2.0 - 1.0 length_pose_encoding = 3 if self.opt.position == 'embedding': input_channel = self.opt.input_channel self.pos_embedding = torch.nn.Parameter(torch.ones( [1, input_channel, self.opt.voxels_size[1], self.opt.voxels_size[2], self.opt.voxels_size[0]])) elif self.opt.position == 'embedding1': input_channel = self.opt.input_channel xyz_in_channels = 1 + 3 embedding_width = 192 embedding_depth = 5 self.embeddingnet = nn.Sequential( nn.Linear(xyz_in_channels, embedding_width), nn.ReLU(inplace=True), *[nn.Sequential(nn.Linear(embedding_width, embedding_width), nn.ReLU(inplace=True)) for _ in range(embedding_depth - 2)], nn.Linear(embedding_width, self.opt.input_channel),) nn.init.constant_(self.embeddingnet[-1].bias, 0) self.pos_embedding1 = None self.pos_embedding_save = torch.nn.Parameter(torch.zeros([1, input_channel, self.opt.voxels_size[1], self.opt.voxels_size[2], self.opt.voxels_size[0]]), requires_grad= False) else: self.pos_embedding = None self.pos_embedding1 = None input_channel = self.opt.input_channel scene_centroid_x = 0.0 scene_centroid_y = 0.0 scene_centroid_z = 0.0 scene_centroid = np.array([scene_centroid_x, scene_centroid_y, scene_centroid_z]).reshape([1, 3]) self.register_buffer('scene_centroid', torch.from_numpy(scene_centroid).float()) self.bounds = (self.opt.real_size[0], self.opt.real_size[1], self.opt.real_size[2], self.opt.real_size[3], self.opt.real_size[4], self.opt.real_size[5]) # bounds = (-40, 40, -40, 40, -1, 5.4) self.vox_util = vox.Vox_util( self.Z, self.Y, self.X, scene_centroid=self.scene_centroid, bounds=self.bounds, position = self.opt.position, length_pose_encoding = length_pose_encoding, opt = self.opt, assert_cube=False) if self.opt.position != 'No' and self.opt.position != 'embedding': self.meta_data = self.vox_util.get_meta_data(cam_center=torch.Tensor([[1.2475, 0.0673, 1.5356]]), camB_T_camA=None).to('cuda') activate_fun = nn.ReLU(inplace=True) if self.opt.aggregation == '3dcnn': out_channel = self.opt.out_channel self._3DCNN = S3DCNN(input_planes=input_channel, out_planes=out_channel, planes=self.opt.con_channel, activate_fun=activate_fun, opt=opt) else: print('please define the aggregation') exit() def feature2vox_simple(self, features, pix_T_cams, cam0_T_camXs, __p, __u): pix_T_cams_ = pix_T_cams camXs_T_cam0_ = geom.safe_inverse(cam0_T_camXs) _, C, Hf, Wf = features.shape sy = Hf / float(self.opt.height) sx = Wf / float(self.opt.width) # unproject image feature to 3d grid featpix_T_cams_ = geom.scale_intrinsics(pix_T_cams_, sx, sy) # pix_T_cams_ shape: [6,4,4] feature down sample -> featpix_T_cams_ feat_mems_ = self.vox_util.unproject_image_to_mem( features, basic.matmul2(featpix_T_cams_, camXs_T_cam0_), camXs_T_cam0_, self.Z, self.Y, self.X) # feat_mems_ shape: torch.Size([6, 128, 200, 8, 200]) feat_mems = __u(feat_mems_) # B, S, C, Z, Y, X # torch.Size([1, 6, 128, 200, 8, 200]) mask_mems = (torch.abs(feat_mems) > 0).float() feat_mem = basic.reduce_masked_mean(feat_mems, mask_mems, dim=1) # B, C, Z, Y, X feat_mem = feat_mem.permute(0, 1, 4, 3, 2) # [0, ...].unsqueeze(0) # ZYX -> XYZ return feat_mem def grid_sampler(self, xyz, *grids, align_corners=True, avail_mask=None, vis=False): '''Wrapper for the interp operation''' # pdb.set_trace() if self.opt.semantic_sample_ratio < 1.0 and self.use_semantic and not vis: group_size = int(1.0 / self.opt.semantic_sample_ratio) group_num = xyz.shape[1] // group_size xyz_sem = xyz[:, :group_size * group_num].reshape(xyz.shape[0], group_num, group_size, 3).mean(dim=2) else: xyz_sem = None if avail_mask is not None: if self.opt.contracted_coord: ind_norm = self.norm_func(xyz) avail_mask = self.effective_points_mask(ind_norm) ind_norm = ind_norm[avail_mask] if xyz_sem is not None: avail_mask_sem = avail_mask[:, :group_size * group_num].reshape(avail_mask.shape[0], group_num, group_size).any(dim=-1) ind_norm_sem = self.norm_func(xyz_sem[avail_mask_sem]) else: xyz_masked = xyz[avail_mask] ind_norm = self.norm_func(xyz_masked) if xyz_sem is not None: avail_mask_sem = avail_mask[:, :group_size * group_num].reshape(avail_mask.shape[0], group_num, group_size).any(dim=-1) ind_norm_sem = self.norm_func(xyz_sem[avail_mask_sem]) else: ind_norm = self.norm_func(xyz) if xyz_sem is not None: ind_norm_sem = self.norm_func(xyz_sem) avail_mask_sem = None ind_norm = ind_norm.flip((-1,)) # value range: [-1, 1] shape = ind_norm.shape[:-1] ind_norm = ind_norm.reshape(1, 1, 1, -1, 3) if xyz_sem is None: grid = grids[0] # BCXYZ # torch.Size([1, C, 256, 256, 16]) ret_lst = F.grid_sample(grid, ind_norm, mode='bilinear', align_corners=align_corners).reshape(grid.shape[1], -1).T.reshape(*shape, grid.shape[1]) if self.use_semantic: semantic, feats = ret_lst[..., :self.semantic_classes], ret_lst[..., -1] return feats, avail_mask, semantic else: return ret_lst.squeeze(), avail_mask else: ind_norm_sem = ind_norm_sem.flip((-1,)) shape_sem = ind_norm_sem.shape[:-1] ind_norm_sem = ind_norm_sem.reshape(1, 1, 1, -1, 3) grid_sem = grids[0][:, :self.semantic_classes] # BCXYZ # torch.Size([1, semantic_classes, H, W, Z]) grid_geo = grids[0][:, -1:] # BCXYZ # torch.Size([1, 1, H, W, Z]) ret_sem = F.grid_sample(grid_sem, ind_norm_sem, mode='bilinear', align_corners=align_corners).reshape(grid_sem.shape[1], -1).T.reshape(*shape_sem, grid_sem.shape[1]) ret_geo = F.grid_sample(grid_geo, ind_norm, mode='bilinear', align_corners=align_corners).reshape(grid_geo.shape[1], -1).T.reshape(*shape, grid_geo.shape[1]) return ret_geo.squeeze(), avail_mask, ret_sem, avail_mask_sem, group_num, group_size def sample_ray(self, rays_o, rays_d, is_train): '''Sample query points on rays''' Zval = self.Zval.to(rays_o) if is_train: Zval = Zval.repeat(rays_d.shape[-2], 1) Zval += (torch.rand_like(Zval[:, [0]]) * 0.2 - 0.1) * self.stepsize_log * self.voxel_size Zval = Zval.clamp(min=0.0) Zval = Zval + self.near rays_pts = rays_o[..., None, :] + rays_d[..., None, :] * Zval[..., None] rays_pts_depth = (rays_o[..., None, :] - rays_pts).norm(dim=-1) if self.opt.contracted_coord: # contracted coordiante has infinite perception range mask_outbbox = torch.zeros_like(rays_pts[..., 0]).bool() else: mask_outbbox = ((self.xyz_min > rays_pts) | (rays_pts > self.xyz_max)).any(dim=-1) return rays_pts, mask_outbbox, Zval, rays_pts_depth def effective_points_mask(self, points): '''Mask out points that are too close to each other in the contracted coordinate''' dist = torch.diff(points, dim=-2, prepend=torch.zeros_like(points[..., :1, :])).abs() xyz_thresh = 0.4 / torch.tensor([self.X, self.Y, self.Z]).to(points) mask = (dist > xyz_thresh).bool().any(dim=-1) return mask def activate_density(self, density, dists): return 1 - torch.exp(-F.relu(density) * dists) def get_density(self, rays_o, rays_d, Voxel_feat, is_train, inputs): dtype = torch.float16 if self.opt.use_fp16 else torch.float32 device = rays_o.device rays_o, rays_d, Voxel_feat = rays_o.to(dtype), rays_d.to(dtype), Voxel_feat.to(dtype) reg_loss = {} eps_time = time.time() with torch.no_grad(): rays_o_i = rays_o[0, ...].flatten(0, 2) # HXWX3 rays_d_i = rays_d[0, ...].flatten(0, 2) # HXWX3 rays_pts, mask_outbbox, z_vals, rays_pts_depth = self.sample_ray(rays_o_i, rays_d_i, is_train=is_train) dists = rays_pts_depth[..., 1:] - rays_pts_depth[..., :-1] # [num pixels, num points - 1] dists = torch.cat([dists, 1e4 * torch.ones_like(dists[..., :1])], dim=-1) # [num pixels, num points] sample_ret = self.grid_sampler(rays_pts, Voxel_feat, avail_mask=~mask_outbbox) if self.use_semantic: if self.opt.semantic_sample_ratio < 1.0: geo_feats, mask, semantic, mask_sem, group_num, group_size = sample_ret else: geo_feats, mask, semantic = sample_ret else: geo_feats, mask = sample_ret if self.opt.render_type == 'prob': weights = torch.zeros_like(rays_pts[..., 0]) weights[:, -1] = 1 geo_feats = torch.sigmoid(geo_feats) if self.opt.last_free: geo_feats = 1.0 - geo_feats # the last channel is the probability of being free weights[mask] = geo_feats # accumulate weights = weights.cumsum(dim=1).clamp(max=1) alphainv_fin = weights[..., -1] weights = weights.diff(dim=1, prepend=torch.zeros((rays_pts.shape[:1])).unsqueeze(1).to(device=device, dtype=dtype)) depth = (weights * z_vals).sum(-1) rgb_marched = 0 elif self.opt.render_type == 'density': alpha = torch.zeros_like(rays_pts[..., 0]) # [num pixels, num points] alpha[mask] = self.activate_density(geo_feats, dists[mask])
weights, alphainv_cum = render.get_ray_marching_ray(alpha)
3
2023-12-14 15:00:21+00:00
12k
Kevin-thu/DiffMorpher
app.py
[ { "identifier": "DiffMorpherPipeline", "path": "model.py", "snippet": "class DiffMorpherPipeline(StableDiffusionPipeline):\n\n def __init__(self,\n vae: AutoencoderKL,\n text_encoder: CLIPTextModel,\n tokenizer: CLIPTokenizer,\n unet: UNet2DConditionModel,\n scheduler: KarrasDiffusionSchedulers,\n safety_checker: StableDiffusionSafetyChecker,\n feature_extractor: CLIPImageProcessor,\n requires_safety_checker: bool = True,\n ):\n super().__init__(vae, text_encoder, tokenizer, unet, scheduler,\n safety_checker, feature_extractor, requires_safety_checker)\n self.img0_dict = dict()\n self.img1_dict = dict()\n\n def inv_step(\n self,\n model_output: torch.FloatTensor,\n timestep: int,\n x: torch.FloatTensor,\n eta=0.,\n verbose=False\n ):\n \"\"\"\n Inverse sampling for DDIM Inversion\n \"\"\"\n if verbose:\n print(\"timestep: \", timestep)\n next_step = timestep\n timestep = min(timestep - self.scheduler.config.num_train_timesteps //\n self.scheduler.num_inference_steps, 999)\n alpha_prod_t = self.scheduler.alphas_cumprod[\n timestep] if timestep >= 0 else self.scheduler.final_alpha_cumprod\n alpha_prod_t_next = self.scheduler.alphas_cumprod[next_step]\n beta_prod_t = 1 - alpha_prod_t\n pred_x0 = (x - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5\n pred_dir = (1 - alpha_prod_t_next)**0.5 * model_output\n x_next = alpha_prod_t_next**0.5 * pred_x0 + pred_dir\n return x_next, pred_x0\n\n @torch.no_grad()\n def invert(\n self,\n image: torch.Tensor,\n prompt,\n num_inference_steps=50,\n num_actual_inference_steps=None,\n guidance_scale=1.,\n eta=0.0,\n **kwds):\n \"\"\"\n invert a real image into noise map with determinisc DDIM inversion\n \"\"\"\n DEVICE = torch.device(\n \"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n batch_size = image.shape[0]\n if isinstance(prompt, list):\n if batch_size == 1:\n image = image.expand(len(prompt), -1, -1, -1)\n elif isinstance(prompt, str):\n if batch_size > 1:\n prompt = [prompt] * batch_size\n\n # text embeddings\n text_input = self.tokenizer(\n prompt,\n padding=\"max_length\",\n max_length=77,\n return_tensors=\"pt\"\n )\n text_embeddings = self.text_encoder(text_input.input_ids.to(DEVICE))[0]\n print(\"input text embeddings :\", text_embeddings.shape)\n # define initial latents\n latents = self.image2latent(image)\n\n # unconditional embedding for classifier free guidance\n if guidance_scale > 1.:\n max_length = text_input.input_ids.shape[-1]\n unconditional_input = self.tokenizer(\n [\"\"] * batch_size,\n padding=\"max_length\",\n max_length=77,\n return_tensors=\"pt\"\n )\n unconditional_embeddings = self.text_encoder(\n unconditional_input.input_ids.to(DEVICE))[0]\n text_embeddings = torch.cat(\n [unconditional_embeddings, text_embeddings], dim=0)\n\n print(\"latents shape: \", latents.shape)\n # interative sampling\n self.scheduler.set_timesteps(num_inference_steps)\n print(\"Valid timesteps: \", reversed(self.scheduler.timesteps))\n # print(\"attributes: \", self.scheduler.__dict__)\n latents_list = [latents]\n pred_x0_list = [latents]\n for i, t in enumerate(tqdm.tqdm(reversed(self.scheduler.timesteps), desc=\"DDIM Inversion\")):\n if num_actual_inference_steps is not None and i >= num_actual_inference_steps:\n continue\n\n if guidance_scale > 1.:\n model_inputs = torch.cat([latents] * 2)\n else:\n model_inputs = latents\n\n # predict the noise\n noise_pred = self.unet(\n model_inputs, t, encoder_hidden_states=text_embeddings).sample\n if guidance_scale > 1.:\n noise_pred_uncon, noise_pred_con = noise_pred.chunk(2, dim=0)\n noise_pred = noise_pred_uncon + guidance_scale * \\\n (noise_pred_con - noise_pred_uncon)\n # compute the previous noise sample x_t-1 -> x_t\n latents, pred_x0 = self.inv_step(noise_pred, t, latents)\n latents_list.append(latents)\n pred_x0_list.append(pred_x0)\n\n return latents\n\n @torch.no_grad()\n def ddim_inversion(self, latent, cond):\n timesteps = reversed(self.scheduler.timesteps)\n with torch.autocast(device_type='cuda', dtype=torch.float32):\n for i, t in enumerate(tqdm.tqdm(timesteps, desc=\"DDIM inversion\")):\n cond_batch = cond.repeat(latent.shape[0], 1, 1)\n\n alpha_prod_t = self.scheduler.alphas_cumprod[t]\n alpha_prod_t_prev = (\n self.scheduler.alphas_cumprod[timesteps[i - 1]]\n if i > 0 else self.scheduler.final_alpha_cumprod\n )\n\n mu = alpha_prod_t ** 0.5\n mu_prev = alpha_prod_t_prev ** 0.5\n sigma = (1 - alpha_prod_t) ** 0.5\n sigma_prev = (1 - alpha_prod_t_prev) ** 0.5\n\n eps = self.unet(\n latent, t, encoder_hidden_states=cond_batch).sample\n\n pred_x0 = (latent - sigma_prev * eps) / mu_prev\n latent = mu * pred_x0 + sigma * eps\n # if save_latents:\n # torch.save(latent, os.path.join(save_path, f'noisy_latents_{t}.pt'))\n # torch.save(latent, os.path.join(save_path, f'noisy_latents_{t}.pt'))\n return latent\n\n def step(\n self,\n model_output: torch.FloatTensor,\n timestep: int,\n x: torch.FloatTensor,\n ):\n \"\"\"\n predict the sample of the next step in the denoise process.\n \"\"\"\n prev_timestep = timestep - \\\n self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps\n alpha_prod_t = self.scheduler.alphas_cumprod[timestep]\n alpha_prod_t_prev = self.scheduler.alphas_cumprod[\n prev_timestep] if prev_timestep > 0 else self.scheduler.final_alpha_cumprod\n beta_prod_t = 1 - alpha_prod_t\n pred_x0 = (x - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5\n pred_dir = (1 - alpha_prod_t_prev)**0.5 * model_output\n x_prev = alpha_prod_t_prev**0.5 * pred_x0 + pred_dir\n return x_prev, pred_x0\n\n @torch.no_grad()\n def image2latent(self, image):\n DEVICE = torch.device(\n \"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n if type(image) is Image:\n image = np.array(image)\n image = torch.from_numpy(image).float() / 127.5 - 1\n image = image.permute(2, 0, 1).unsqueeze(0)\n # input image density range [-1, 1]\n latents = self.vae.encode(image.to(DEVICE))['latent_dist'].mean\n latents = latents * 0.18215\n return latents\n\n @torch.no_grad()\n def latent2image(self, latents, return_type='np'):\n latents = 1 / 0.18215 * latents.detach()\n image = self.vae.decode(latents)['sample']\n if return_type == 'np':\n image = (image / 2 + 0.5).clamp(0, 1)\n image = image.cpu().permute(0, 2, 3, 1).numpy()[0]\n image = (image * 255).astype(np.uint8)\n elif return_type == \"pt\":\n image = (image / 2 + 0.5).clamp(0, 1)\n\n return image\n\n def latent2image_grad(self, latents):\n latents = 1 / 0.18215 * latents\n image = self.vae.decode(latents)['sample']\n\n return image # range [-1, 1]\n\n @torch.no_grad()\n def cal_latent(self, num_inference_steps, guidance_scale, unconditioning, img_noise_0, img_noise_1, text_embeddings_0, text_embeddings_1, lora_0, lora_1, alpha, use_lora, fix_lora=None):\n # latents = torch.cos(alpha * torch.pi / 2) * img_noise_0 + \\\n # torch.sin(alpha * torch.pi / 2) * img_noise_1\n # latents = (1 - alpha) * img_noise_0 + alpha * img_noise_1\n # latents = latents / ((1 - alpha) ** 2 + alpha ** 2)\n latents = slerp(img_noise_0, img_noise_1, alpha, self.use_adain)\n text_embeddings = (1 - alpha) * text_embeddings_0 + \\\n alpha * text_embeddings_1\n\n self.scheduler.set_timesteps(num_inference_steps)\n if use_lora:\n if fix_lora is not None:\n self.unet = load_lora(self.unet, lora_0, lora_1, fix_lora)\n else:\n self.unet = load_lora(self.unet, lora_0, lora_1, alpha)\n\n for i, t in enumerate(tqdm.tqdm(self.scheduler.timesteps, desc=f\"DDIM Sampler, alpha={alpha}\")):\n\n if guidance_scale > 1.:\n model_inputs = torch.cat([latents] * 2)\n else:\n model_inputs = latents\n if unconditioning is not None and isinstance(unconditioning, list):\n _, text_embeddings = text_embeddings.chunk(2)\n text_embeddings = torch.cat(\n [unconditioning[i].expand(*text_embeddings.shape), text_embeddings])\n # predict the noise\n noise_pred = self.unet(\n model_inputs, t, encoder_hidden_states=text_embeddings).sample\n if guidance_scale > 1.0:\n noise_pred_uncon, noise_pred_con = noise_pred.chunk(\n 2, dim=0)\n noise_pred = noise_pred_uncon + guidance_scale * \\\n (noise_pred_con - noise_pred_uncon)\n # compute the previous noise sample x_t -> x_t-1\n latents = self.scheduler.step(\n noise_pred, t, latents, return_dict=False)[0]\n return latents\n\n @torch.no_grad()\n def get_text_embeddings(self, prompt, guidance_scale, neg_prompt, batch_size):\n DEVICE = torch.device(\n \"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n # text embeddings\n text_input = self.tokenizer(\n prompt,\n padding=\"max_length\",\n max_length=77,\n return_tensors=\"pt\"\n )\n text_embeddings = self.text_encoder(text_input.input_ids.cuda())[0]\n\n if guidance_scale > 1.:\n if neg_prompt:\n uc_text = neg_prompt\n else:\n uc_text = \"\"\n unconditional_input = self.tokenizer(\n [uc_text] * batch_size,\n padding=\"max_length\",\n max_length=77,\n return_tensors=\"pt\"\n )\n unconditional_embeddings = self.text_encoder(\n unconditional_input.input_ids.to(DEVICE))[0]\n text_embeddings = torch.cat(\n [unconditional_embeddings, text_embeddings], dim=0)\n\n return text_embeddings\n\n def __call__(\n self,\n img_0=None,\n img_1=None,\n img_path_0=None,\n img_path_1=None,\n prompt_0=\"\",\n prompt_1=\"\",\n save_lora_dir=\"./lora\",\n load_lora_path_0=None,\n load_lora_path_1=None,\n lora_steps=200,\n lora_lr=2e-4,\n lora_rank=16,\n batch_size=1,\n height=512,\n width=512,\n num_inference_steps=50,\n num_actual_inference_steps=None,\n guidance_scale=1,\n attn_beta=0,\n lamd=0.6,\n use_lora=True,\n use_adain=True,\n use_reschedule=True,\n output_path = \"./results\",\n num_frames=50,\n fix_lora=None,\n progress=tqdm,\n unconditioning=None,\n neg_prompt=None,\n save_intermediates=False,\n **kwds):\n\n # if isinstance(prompt, list):\n # batch_size = len(prompt)\n # elif isinstance(prompt, str):\n # if batch_size > 1:\n # prompt = [prompt] * batch_size\n self.scheduler.set_timesteps(num_inference_steps)\n self.use_lora = use_lora\n self.use_adain = use_adain\n self.use_reschedule = use_reschedule\n self.output_path = output_path\n \n if img_0 is None:\n img_0 = Image.open(img_path_0).convert(\"RGB\")\n # else:\n # img_0 = Image.fromarray(img_0).convert(\"RGB\")\n \n if img_1 is None:\n img_1 = Image.open(img_path_1).convert(\"RGB\")\n # else:\n # img_1 = Image.fromarray(img_1).convert(\"RGB\")\n if self.use_lora:\n print(\"Loading lora...\")\n if not load_lora_path_0:\n\n weight_name = f\"{output_path.split('/')[-1]}_lora_0.ckpt\"\n load_lora_path_0 = save_lora_dir + \"/\" + weight_name\n if not os.path.exists(load_lora_path_0):\n train_lora(img_0, prompt_0, save_lora_dir, None, self.tokenizer, self.text_encoder,\n self.vae, self.unet, self.scheduler, lora_steps, lora_lr, lora_rank, weight_name=weight_name)\n print(f\"Load from {load_lora_path_0}.\")\n if load_lora_path_0.endswith(\".safetensors\"):\n lora_0 = safetensors.torch.load_file(\n load_lora_path_0, device=\"cpu\")\n else:\n lora_0 = torch.load(load_lora_path_0, map_location=\"cpu\")\n\n if not load_lora_path_1:\n weight_name = f\"{output_path.split('/')[-1]}_lora_1.ckpt\"\n load_lora_path_1 = save_lora_dir + \"/\" + weight_name\n if not os.path.exists(load_lora_path_1):\n train_lora(img_1, prompt_1, save_lora_dir, None, self.tokenizer, self.text_encoder,\n self.vae, self.unet, self.scheduler, lora_steps, lora_lr, lora_rank, weight_name=weight_name)\n print(f\"Load from {load_lora_path_1}.\")\n if load_lora_path_1.endswith(\".safetensors\"):\n lora_1 = safetensors.torch.load_file(\n load_lora_path_1, device=\"cpu\")\n else:\n lora_1 = torch.load(load_lora_path_1, map_location=\"cpu\")\n\n text_embeddings_0 = self.get_text_embeddings(\n prompt_0, guidance_scale, neg_prompt, batch_size)\n text_embeddings_1 = self.get_text_embeddings(\n prompt_1, guidance_scale, neg_prompt, batch_size)\n img_0 = get_img(img_0)\n img_1 = get_img(img_1)\n if self.use_lora:\n self.unet = load_lora(self.unet, lora_0, lora_1, 0)\n img_noise_0 = self.ddim_inversion(\n self.image2latent(img_0), text_embeddings_0)\n if self.use_lora:\n self.unet = load_lora(self.unet, lora_0, lora_1, 1)\n img_noise_1 = self.ddim_inversion(\n self.image2latent(img_1), text_embeddings_1)\n\n print(\"latents shape: \", img_noise_0.shape)\n\n def morph(alpha_list, progress, desc):\n images = []\n if attn_beta is not None:\n\n self.unet = load_lora(self.unet, lora_0, lora_1, 0 if fix_lora is None else fix_lora)\n attn_processor_dict = {}\n for k in self.unet.attn_processors.keys():\n if do_replace_attn(k):\n attn_processor_dict[k] = StoreProcessor(self.unet.attn_processors[k],\n self.img0_dict, k)\n else:\n attn_processor_dict[k] = self.unet.attn_processors[k]\n self.unet.set_attn_processor(attn_processor_dict)\n\n latents = self.cal_latent(\n num_inference_steps,\n guidance_scale,\n unconditioning,\n img_noise_0,\n img_noise_1,\n text_embeddings_0,\n text_embeddings_1,\n lora_0,\n lora_1,\n alpha_list[0],\n False,\n fix_lora\n )\n first_image = self.latent2image(latents)\n first_image = Image.fromarray(first_image)\n if save_intermediates:\n first_image.save(f\"{self.output_path}/{0:02d}.png\")\n\n self.unet = load_lora(self.unet, lora_0, lora_1, 1 if fix_lora is None else fix_lora)\n attn_processor_dict = {}\n for k in self.unet.attn_processors.keys():\n if do_replace_attn(k):\n attn_processor_dict[k] = StoreProcessor(self.unet.attn_processors[k],\n self.img1_dict, k)\n else:\n attn_processor_dict[k] = self.unet.attn_processors[k]\n\n self.unet.set_attn_processor(attn_processor_dict)\n\n latents = self.cal_latent(\n num_inference_steps,\n guidance_scale,\n unconditioning,\n img_noise_0,\n img_noise_1,\n text_embeddings_0,\n text_embeddings_1,\n lora_0,\n lora_1,\n alpha_list[-1], \n False,\n fix_lora\n )\n last_image = self.latent2image(latents)\n last_image = Image.fromarray(last_image)\n if save_intermediates:\n last_image.save(\n f\"{self.output_path}/{num_frames - 1:02d}.png\")\n\n for i in progress.tqdm(range(1, num_frames - 1), desc=desc):\n alpha = alpha_list[i]\n self.unet = load_lora(self.unet, lora_0, lora_1, alpha if fix_lora is None else fix_lora)\n attn_processor_dict = {}\n for k in self.unet.attn_processors.keys():\n if do_replace_attn(k):\n attn_processor_dict[k] = LoadProcessor(\n self.unet.attn_processors[k], k, self.img0_dict, self.img1_dict, alpha, attn_beta, lamd)\n else:\n attn_processor_dict[k] = self.unet.attn_processors[k]\n\n self.unet.set_attn_processor(attn_processor_dict)\n\n latents = self.cal_latent(\n num_inference_steps,\n guidance_scale,\n unconditioning,\n img_noise_0,\n img_noise_1,\n text_embeddings_0,\n text_embeddings_1,\n lora_0,\n lora_1,\n alpha_list[i], \n False,\n fix_lora\n )\n image = self.latent2image(latents)\n image = Image.fromarray(image)\n if save_intermediates:\n image.save(f\"{self.output_path}/{i:02d}.png\")\n images.append(image)\n\n images = [first_image] + images + [last_image]\n\n else:\n for k, alpha in enumerate(alpha_list):\n\n latents = self.cal_latent(\n num_inference_steps,\n guidance_scale,\n unconditioning,\n img_noise_0,\n img_noise_1,\n text_embeddings_0,\n text_embeddings_1,\n lora_0,\n lora_1,\n alpha_list[k], \n self.use_lora,\n fix_lora\n )\n image = self.latent2image(latents)\n image = Image.fromarray(image)\n if save_intermediates:\n image.save(f\"{self.output_path}/{k:02d}.png\")\n images.append(image)\n\n return images\n\n with torch.no_grad():\n if self.use_reschedule:\n alpha_scheduler = AlphaScheduler()\n alpha_list = list(torch.linspace(0, 1, num_frames))\n images_pt = morph(alpha_list, progress, \"Sampling...\")\n images_pt = [transforms.ToTensor()(img).unsqueeze(0)\n for img in images_pt]\n alpha_scheduler.from_imgs(images_pt)\n alpha_list = alpha_scheduler.get_list()\n print(alpha_list)\n images = morph(alpha_list, progress, \"Reschedule...\")\n else:\n alpha_list = list(torch.linspace(0, 1, num_frames))\n print(alpha_list)\n images = morph(alpha_list, progress, \"Sampling...\")\n\n return images" }, { "identifier": "train_lora", "path": "utils/lora_utils.py", "snippet": "def train_lora(image, prompt, save_lora_dir, model_path=None, tokenizer=None, text_encoder=None, vae=None, unet=None, noise_scheduler=None, lora_steps=200, lora_lr=2e-4, lora_rank=16, weight_name=None, safe_serialization=False, progress=tqdm):\n # initialize accelerator\n accelerator = Accelerator(\n gradient_accumulation_steps=1,\n # mixed_precision='fp16'\n )\n set_seed(0)\n\n # Load the tokenizer\n if tokenizer is None:\n tokenizer = AutoTokenizer.from_pretrained(\n model_path,\n subfolder=\"tokenizer\",\n revision=None,\n use_fast=False,\n )\n # initialize the model\n if noise_scheduler is None:\n noise_scheduler = DDPMScheduler.from_pretrained(model_path, subfolder=\"scheduler\")\n if text_encoder is None:\n text_encoder_cls = import_model_class_from_model_name_or_path(model_path, revision=None)\n text_encoder = text_encoder_cls.from_pretrained(\n model_path, subfolder=\"text_encoder\", revision=None\n )\n if vae is None:\n vae = AutoencoderKL.from_pretrained(\n model_path, subfolder=\"vae\", revision=None\n )\n if unet is None:\n unet = UNet2DConditionModel.from_pretrained(\n model_path, subfolder=\"unet\", revision=None\n )\n\n # set device and dtype\n device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n\n vae.requires_grad_(False)\n text_encoder.requires_grad_(False)\n unet.requires_grad_(False)\n\n unet.to(device)\n vae.to(device)\n text_encoder.to(device)\n\n # initialize UNet LoRA\n unet_lora_attn_procs = {}\n for name, attn_processor in unet.attn_processors.items():\n cross_attention_dim = None if name.endswith(\"attn1.processor\") else unet.config.cross_attention_dim\n if name.startswith(\"mid_block\"):\n hidden_size = unet.config.block_out_channels[-1]\n elif name.startswith(\"up_blocks\"):\n block_id = int(name[len(\"up_blocks.\")])\n hidden_size = list(reversed(unet.config.block_out_channels))[block_id]\n elif name.startswith(\"down_blocks\"):\n block_id = int(name[len(\"down_blocks.\")])\n hidden_size = unet.config.block_out_channels[block_id]\n else:\n raise NotImplementedError(\"name must start with up_blocks, mid_blocks, or down_blocks\")\n\n if isinstance(attn_processor, (AttnAddedKVProcessor, SlicedAttnAddedKVProcessor, AttnAddedKVProcessor2_0)):\n lora_attn_processor_class = LoRAAttnAddedKVProcessor\n else:\n lora_attn_processor_class = (\n LoRAAttnProcessor2_0 if hasattr(F, \"scaled_dot_product_attention\") else LoRAAttnProcessor\n )\n unet_lora_attn_procs[name] = lora_attn_processor_class(\n hidden_size=hidden_size, cross_attention_dim=cross_attention_dim, rank=lora_rank\n )\n unet.set_attn_processor(unet_lora_attn_procs)\n unet_lora_layers = AttnProcsLayers(unet.attn_processors)\n\n # Optimizer creation\n params_to_optimize = (unet_lora_layers.parameters())\n optimizer = torch.optim.AdamW(\n params_to_optimize,\n lr=lora_lr,\n betas=(0.9, 0.999),\n weight_decay=1e-2,\n eps=1e-08,\n )\n\n lr_scheduler = get_scheduler(\n \"constant\",\n optimizer=optimizer,\n num_warmup_steps=0,\n num_training_steps=lora_steps,\n num_cycles=1,\n power=1.0,\n )\n\n # prepare accelerator\n unet_lora_layers = accelerator.prepare_model(unet_lora_layers)\n optimizer = accelerator.prepare_optimizer(optimizer)\n lr_scheduler = accelerator.prepare_scheduler(lr_scheduler)\n\n # initialize text embeddings\n with torch.no_grad():\n text_inputs = tokenize_prompt(tokenizer, prompt, tokenizer_max_length=None)\n text_embedding = encode_prompt(\n text_encoder,\n text_inputs.input_ids,\n text_inputs.attention_mask,\n text_encoder_use_attention_mask=False\n )\n\n if type(image) == np.ndarray:\n image = Image.fromarray(image)\n \n # initialize latent distribution\n image_transforms = transforms.Compose(\n [\n transforms.Resize(512, interpolation=transforms.InterpolationMode.BILINEAR),\n # transforms.RandomCrop(512),\n transforms.ToTensor(),\n transforms.Normalize([0.5], [0.5]),\n ]\n )\n\n image = image_transforms(image).to(device)\n image = image.unsqueeze(dim=0)\n \n latents_dist = vae.encode(image).latent_dist\n for _ in progress.tqdm(range(lora_steps), desc=\"Training LoRA...\"):\n unet.train()\n model_input = latents_dist.sample() * vae.config.scaling_factor\n # Sample noise that we'll add to the latents\n noise = torch.randn_like(model_input)\n bsz, channels, height, width = model_input.shape\n # Sample a random timestep for each image\n timesteps = torch.randint(\n 0, noise_scheduler.config.num_train_timesteps, (bsz,), device=model_input.device\n )\n timesteps = timesteps.long()\n\n # Add noise to the model input according to the noise magnitude at each timestep\n # (this is the forward diffusion process)\n noisy_model_input = noise_scheduler.add_noise(model_input, noise, timesteps)\n\n # Predict the noise residual\n model_pred = unet(noisy_model_input, timesteps, text_embedding).sample\n\n # Get the target for loss depending on the prediction type\n if noise_scheduler.config.prediction_type == \"epsilon\":\n target = noise\n elif noise_scheduler.config.prediction_type == \"v_prediction\":\n target = noise_scheduler.get_velocity(model_input, noise, timesteps)\n else:\n raise ValueError(f\"Unknown prediction type {noise_scheduler.config.prediction_type}\")\n\n loss = F.mse_loss(model_pred.float(), target.float(), reduction=\"mean\")\n accelerator.backward(loss)\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad()\n\n # save the trained lora\n # unet = unet.to(torch.float32)\n # vae = vae.to(torch.float32)\n # text_encoder = text_encoder.to(torch.float32)\n\n # unwrap_model is used to remove all special modules added when doing distributed training\n # so here, there is no need to call unwrap_model\n # unet_lora_layers = accelerator.unwrap_model(unet_lora_layers)\n LoraLoaderMixin.save_lora_weights(\n save_directory=save_lora_dir,\n unet_lora_layers=unet_lora_layers,\n text_encoder_lora_layers=None,\n weight_name=weight_name,\n safe_serialization=safe_serialization\n )" } ]
import os import torch import numpy as np import cv2 import gradio as gr from PIL import Image from datetime import datetime from model import DiffMorpherPipeline from utils.lora_utils import train_lora
7,250
LENGTH=450 def train_lora_interface( image, prompt, model_path, output_path, lora_steps, lora_rank, lora_lr, num ): os.makedirs(output_path, exist_ok=True) train_lora(image, prompt, output_path, model_path, lora_steps=lora_steps, lora_lr=lora_lr, lora_rank=lora_rank, weight_name=f"lora_{num}.ckpt", progress=gr.Progress()) return f"Train LoRA {'A' if num == 0 else 'B'} Done!" def run_diffmorpher( image_0, image_1, prompt_0, prompt_1, model_path, lora_mode, lamb, use_adain, use_reschedule, num_frames, fps, save_inter, load_lora_path_0, load_lora_path_1, output_path ): run_id = datetime.now().strftime("%H%M") + "_" + datetime.now().strftime("%Y%m%d") os.makedirs(output_path, exist_ok=True)
LENGTH=450 def train_lora_interface( image, prompt, model_path, output_path, lora_steps, lora_rank, lora_lr, num ): os.makedirs(output_path, exist_ok=True) train_lora(image, prompt, output_path, model_path, lora_steps=lora_steps, lora_lr=lora_lr, lora_rank=lora_rank, weight_name=f"lora_{num}.ckpt", progress=gr.Progress()) return f"Train LoRA {'A' if num == 0 else 'B'} Done!" def run_diffmorpher( image_0, image_1, prompt_0, prompt_1, model_path, lora_mode, lamb, use_adain, use_reschedule, num_frames, fps, save_inter, load_lora_path_0, load_lora_path_1, output_path ): run_id = datetime.now().strftime("%H%M") + "_" + datetime.now().strftime("%Y%m%d") os.makedirs(output_path, exist_ok=True)
morpher_pipeline = DiffMorpherPipeline.from_pretrained(model_path, torch_dtype=torch.float32).to("cuda")
0
2023-12-11 15:19:07+00:00
12k
modelscope/richdreamer
threestudio/systems/base.py
[ { "identifier": "Exporter", "path": "threestudio/models/exporters/base.py", "snippet": "class Exporter(BaseObject):\n @dataclass\n class Config(BaseObject.Config):\n save_video: bool = False\n\n cfg: Config\n\n def configure(\n self,\n geometry: BaseImplicitGeometry,\n material: BaseMaterial,\n background: BaseBackground,\n ) -> None:\n @dataclass\n class SubModules:\n geometry: BaseImplicitGeometry\n material: BaseMaterial\n background: BaseBackground\n\n self.sub_modules = SubModules(geometry, material, background)\n\n @property\n def geometry(self) -> BaseImplicitGeometry:\n return self.sub_modules.geometry\n\n @property\n def material(self) -> BaseMaterial:\n return self.sub_modules.material\n\n @property\n def background(self) -> BaseBackground:\n return self.sub_modules.background\n\n def __call__(self, *args, **kwargs) -> List[ExporterOutput]:\n raise NotImplementedError" }, { "identifier": "ExporterOutput", "path": "threestudio/models/exporters/base.py", "snippet": "class ExporterOutput:\n save_name: str\n save_type: str\n params: Dict[str, Any]" }, { "identifier": "parse_optimizer", "path": "threestudio/systems/utils.py", "snippet": "def parse_optimizer(config, model):\n if hasattr(config, \"params\"):\n params = [\n {\"params\": get_parameters(model, name), \"name\": name, **args}\n for name, args in config.params.items()\n ]\n threestudio.debug(f\"Specify optimizer params: {config.params}\")\n else:\n params = model.parameters()\n if config.name in [\"FusedAdam\"]:\n import apex\n\n optim = getattr(apex.optimizers, config.name)(params, **config.args)\n elif config.name in [\"Adan\"]:\n from threestudio.systems import optimizers\n\n optim = getattr(optimizers, config.name)(params, **config.args)\n else:\n optim = getattr(torch.optim, config.name)(params, **config.args)\n return optim" }, { "identifier": "parse_scheduler", "path": "threestudio/systems/utils.py", "snippet": "def parse_scheduler(config, optimizer):\n interval = config.get(\"interval\", \"epoch\")\n assert interval in [\"epoch\", \"step\"]\n if config.name == \"SequentialLR\":\n scheduler = {\n \"scheduler\": lr_scheduler.SequentialLR(\n optimizer,\n [\n parse_scheduler(conf, optimizer)[\"scheduler\"]\n for conf in config.schedulers\n ],\n milestones=config.milestones,\n ),\n \"interval\": interval,\n }\n elif config.name == \"ChainedScheduler\":\n scheduler = {\n \"scheduler\": lr_scheduler.ChainedScheduler(\n [\n parse_scheduler(conf, optimizer)[\"scheduler\"]\n for conf in config.schedulers\n ]\n ),\n \"interval\": interval,\n }\n else:\n scheduler = {\n \"scheduler\": get_scheduler(config.name)(optimizer, **config.args),\n \"interval\": interval,\n }\n return scheduler" }, { "identifier": "Updateable", "path": "threestudio/utils/base.py", "snippet": "class Updateable:\n def do_update_step(\n self, epoch: int, global_step: int, on_load_weights: bool = False\n ):\n for attr in self.__dir__():\n if attr.startswith(\"_\"):\n continue\n try:\n module = getattr(self, attr)\n except:\n continue # ignore attributes like property, which can't be retrived using getattr?\n if isinstance(module, Updateable):\n module.do_update_step(\n epoch, global_step, on_load_weights=on_load_weights\n )\n self.update_step(epoch, global_step, on_load_weights=on_load_weights)\n\n def do_update_step_end(self, epoch: int, global_step: int):\n for attr in self.__dir__():\n if attr.startswith(\"_\"):\n continue\n try:\n module = getattr(self, attr)\n except:\n continue # ignore attributes like property, which can't be retrived using getattr?\n if isinstance(module, Updateable):\n module.do_update_step_end(epoch, global_step)\n self.update_step_end(epoch, global_step)\n\n def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):\n # override this method to implement custom update logic\n # if on_load_weights is True, you should be careful doing things related to model evaluations,\n # as the models and tensors are not guarenteed to be on the same device\n pass\n\n def update_step_end(self, epoch: int, global_step: int):\n pass" }, { "identifier": "update_end_if_possible", "path": "threestudio/utils/base.py", "snippet": "def update_end_if_possible(module: Any, epoch: int, global_step: int) -> None:\n if isinstance(module, Updateable):\n module.do_update_step_end(epoch, global_step)" }, { "identifier": "update_if_possible", "path": "threestudio/utils/base.py", "snippet": "def update_if_possible(module: Any, epoch: int, global_step: int) -> None:\n if isinstance(module, Updateable):\n module.do_update_step(epoch, global_step)" }, { "identifier": "parse_structured", "path": "threestudio/utils/config.py", "snippet": "def parse_structured(fields: Any, cfg: Optional[Union[dict, DictConfig]] = None) -> Any:\n scfg = OmegaConf.structured(fields(**cfg))\n return scfg" }, { "identifier": "C", "path": "threestudio/utils/misc.py", "snippet": "def C(value: Any, epoch: int, global_step: int) -> float:\n if isinstance(value, int) or isinstance(value, float):\n pass\n else:\n value = config_to_primitive(value)\n if not isinstance(value, list):\n raise TypeError(\"Scalar specification only supports list, got\", type(value))\n if len(value) == 3:\n value = [0] + value\n assert len(value) == 4\n start_step, start_value, end_value, end_step = value\n if isinstance(end_step, int):\n current_step = global_step\n value = start_value + (end_value - start_value) * max(\n min(1.0, (current_step - start_step) / (end_step - start_step)), 0.0\n )\n elif isinstance(end_step, float):\n current_step = epoch\n value = start_value + (end_value - start_value) * max(\n min(1.0, (current_step - start_step) / (end_step - start_step)), 0.0\n )\n return value" }, { "identifier": "cleanup", "path": "threestudio/utils/misc.py", "snippet": "def cleanup():\n gc.collect()\n torch.cuda.empty_cache()\n tcnn.free_temporary_memory()" }, { "identifier": "get_device", "path": "threestudio/utils/misc.py", "snippet": "def get_device():\n return torch.device(f\"cuda:{get_rank()}\")" }, { "identifier": "load_module_weights", "path": "threestudio/utils/misc.py", "snippet": "def load_module_weights(\n path, module_name=None, ignore_modules=None, map_location=None\n) -> Tuple[dict, int, int]:\n if module_name is not None and ignore_modules is not None:\n raise ValueError(\"module_name and ignore_modules cannot be both set\")\n if map_location is None:\n map_location = get_device()\n\n ckpt = torch.load(path, map_location=map_location)\n state_dict = ckpt[\"state_dict\"]\n state_dict_to_load = state_dict\n\n if ignore_modules is not None:\n state_dict_to_load = {}\n for k, v in state_dict.items():\n ignore = any(\n [k.startswith(ignore_module + \".\") for ignore_module in ignore_modules]\n )\n if ignore:\n continue\n state_dict_to_load[k] = v\n\n if module_name is not None:\n state_dict_to_load = {}\n for k, v in state_dict.items():\n m = re.match(rf\"^{module_name}\\.(.*)$\", k)\n if m is None:\n continue\n state_dict_to_load[m.group(1)] = v\n\n return state_dict_to_load, ckpt[\"epoch\"], ckpt[\"global_step\"]" }, { "identifier": "SaverMixin", "path": "threestudio/utils/saving.py", "snippet": "class SaverMixin:\n _save_dir: Optional[str] = None\n _exp_root_save_dir: Optional[str] = None\n _wandb_logger: Optional[WandbLogger] = None\n\n def set_save_dir(self, save_dir: str):\n self._save_dir = save_dir\n\n def set_exp_root_dir(self, exp_root_dir: str):\n self._exp_root_save_dir = exp_root_dir\n\n def get_save_dir(self):\n if self._save_dir is None:\n raise ValueError(\"Save dir is not set\")\n return self._save_dir\n\n def get_exp_root_dir(self):\n if self._exp_root_save_dir is None:\n raise ValueError(\"exp root save dir dir is not set\")\n return self._exp_root_save_dir\n\n\n def convert_data(self, data):\n if data is None:\n return None\n elif isinstance(data, np.ndarray):\n return data\n elif isinstance(data, torch.Tensor):\n return data.detach().cpu().numpy()\n elif isinstance(data, list):\n return [self.convert_data(d) for d in data]\n elif isinstance(data, dict):\n return {k: self.convert_data(v) for k, v in data.items()}\n else:\n raise TypeError(\n \"Data must be in type numpy.ndarray, torch.Tensor, list or dict, getting\",\n type(data),\n )\n\n def get_save_path(self, filename):\n save_path = os.path.join(self.get_save_dir(), filename)\n os.makedirs(os.path.dirname(save_path), exist_ok=True)\n return save_path\n\n def create_loggers(self, cfg_loggers: DictConfig) -> None:\n if \"wandb\" in cfg_loggers.keys() and cfg_loggers.wandb.enable:\n self._wandb_logger = WandbLogger(\n project=cfg_loggers.wandb.project, name=cfg_loggers.wandb.name\n )\n\n def get_loggers(self) -> List:\n if self._wandb_logger:\n return [self._wandb_logger]\n else:\n return []\n\n DEFAULT_RGB_KWARGS = {\"data_format\": \"HWC\", \"data_range\": (0, 1)}\n DEFAULT_UV_KWARGS = {\n \"data_format\": \"HWC\",\n \"data_range\": (0, 1),\n \"cmap\": \"checkerboard\",\n }\n DEFAULT_GRAYSCALE_KWARGS = {\"data_range\": None, \"cmap\": \"jet\"}\n DEFAULT_GRID_KWARGS = {\"align\": \"max\"}\n\n def get_rgb_image_(self, img, data_format, data_range, rgba=False):\n img = self.convert_data(img)\n assert data_format in [\"CHW\", \"HWC\"]\n if data_format == \"CHW\":\n img = img.transpose(1, 2, 0)\n if img.dtype != np.uint8:\n img = img.clip(min=data_range[0], max=data_range[1])\n img = (\n (img - data_range[0]) / (data_range[1] - data_range[0]) * 255.0\n ).astype(np.uint8)\n nc = 4 if rgba else 3\n imgs = [img[..., start : start + nc] for start in range(0, img.shape[-1], nc)]\n imgs = [\n img_\n if img_.shape[-1] == nc\n else np.concatenate(\n [\n img_,\n np.zeros(\n (img_.shape[0], img_.shape[1], nc - img_.shape[2]),\n dtype=img_.dtype,\n ),\n ],\n axis=-1,\n )\n for img_ in imgs\n ]\n img = np.concatenate(imgs, axis=1)\n if rgba:\n img = cv2.cvtColor(img, cv2.COLOR_RGBA2BGRA)\n else:\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n return img\n\n def _save_rgb_image(\n self,\n filename,\n img,\n data_format,\n data_range,\n name: Optional[str] = None,\n step: Optional[int] = None,\n ):\n img = self.get_rgb_image_(img, data_format, data_range)\n cv2.imwrite(filename, img)\n if name and self._wandb_logger:\n wandb.log(\n {\n name: wandb.Image(self.get_save_path(filename)),\n \"trainer/global_step\": step,\n }\n )\n\n def save_rgb_image(\n self,\n filename,\n img,\n data_format=DEFAULT_RGB_KWARGS[\"data_format\"],\n data_range=DEFAULT_RGB_KWARGS[\"data_range\"],\n name: Optional[str] = None,\n step: Optional[int] = None,\n ) -> str:\n save_path = self.get_save_path(filename)\n self._save_rgb_image(save_path, img, data_format, data_range, name, step)\n return save_path\n\n def get_uv_image_(self, img, data_format, data_range, cmap):\n img = self.convert_data(img)\n assert data_format in [\"CHW\", \"HWC\"]\n if data_format == \"CHW\":\n img = img.transpose(1, 2, 0)\n img = img.clip(min=data_range[0], max=data_range[1])\n img = (img - data_range[0]) / (data_range[1] - data_range[0])\n assert cmap in [\"checkerboard\", \"color\"]\n if cmap == \"checkerboard\":\n n_grid = 64\n mask = (img * n_grid).astype(int)\n mask = (mask[..., 0] + mask[..., 1]) % 2 == 0\n img = np.ones((img.shape[0], img.shape[1], 3), dtype=np.uint8) * 255\n img[mask] = np.array([255, 0, 255], dtype=np.uint8)\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n elif cmap == \"color\":\n img_ = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n img_[..., 0] = (img[..., 0] * 255).astype(np.uint8)\n img_[..., 1] = (img[..., 1] * 255).astype(np.uint8)\n img_ = cv2.cvtColor(img_, cv2.COLOR_RGB2BGR)\n img = img_\n return img\n\n def save_uv_image(\n self,\n filename,\n img,\n data_format=DEFAULT_UV_KWARGS[\"data_format\"],\n data_range=DEFAULT_UV_KWARGS[\"data_range\"],\n cmap=DEFAULT_UV_KWARGS[\"cmap\"],\n ) -> str:\n save_path = self.get_save_path(filename)\n img = self.get_uv_image_(img, data_format, data_range, cmap)\n cv2.imwrite(save_path, img)\n return save_path\n\n def get_grayscale_image_(self, img, data_range, cmap):\n img = self.convert_data(img)\n img = np.nan_to_num(img)\n if data_range is None:\n img = (img - img.min()) / (img.max() - img.min())\n else:\n img = img.clip(data_range[0], data_range[1])\n img = (img - data_range[0]) / (data_range[1] - data_range[0])\n assert cmap in [None, \"jet\", \"magma\", \"spectral\"]\n if cmap == None:\n img = (img * 255.0).astype(np.uint8)\n img = np.repeat(img[..., None], 3, axis=2)\n elif cmap == \"jet\":\n img = (img * 255.0).astype(np.uint8)\n img = cv2.applyColorMap(img, cv2.COLORMAP_JET)\n elif cmap == \"magma\":\n img = 1.0 - img\n base = cm.get_cmap(\"magma\")\n num_bins = 256\n colormap = LinearSegmentedColormap.from_list(\n f\"{base.name}{num_bins}\", base(np.linspace(0, 1, num_bins)), num_bins\n )(np.linspace(0, 1, num_bins))[:, :3]\n a = np.floor(img * 255.0)\n b = (a + 1).clip(max=255.0)\n f = img * 255.0 - a\n a = a.astype(np.uint16).clip(0, 255)\n b = b.astype(np.uint16).clip(0, 255)\n img = colormap[a] + (colormap[b] - colormap[a]) * f[..., None]\n img = (img * 255.0).astype(np.uint8)\n elif cmap == \"spectral\":\n colormap = plt.get_cmap(\"Spectral\")\n\n def blend_rgba(image):\n image = image[..., :3] * image[..., -1:] + (\n 1.0 - image[..., -1:]\n ) # blend A to RGB\n return image\n\n img = colormap(img)\n img = blend_rgba(img)\n img = (img * 255).astype(np.uint8)\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n return img\n\n def _save_grayscale_image(\n self,\n filename,\n img,\n data_range,\n cmap,\n name: Optional[str] = None,\n step: Optional[int] = None,\n ):\n img = self.get_grayscale_image_(img, data_range, cmap)\n cv2.imwrite(filename, img)\n if name and self._wandb_logger:\n wandb.log(\n {\n name: wandb.Image(self.get_save_path(filename)),\n \"trainer/global_step\": step,\n }\n )\n\n def save_grayscale_image(\n self,\n filename,\n img,\n data_range=DEFAULT_GRAYSCALE_KWARGS[\"data_range\"],\n cmap=DEFAULT_GRAYSCALE_KWARGS[\"cmap\"],\n name: Optional[str] = None,\n step: Optional[int] = None,\n ) -> str:\n save_path = self.get_save_path(filename)\n self._save_grayscale_image(save_path, img, data_range, cmap, name, step)\n return save_path\n\n def get_image_grid_(self, imgs, align):\n if isinstance(imgs[0], list):\n return np.concatenate(\n [self.get_image_grid_(row, align) for row in imgs], axis=0\n )\n cols = []\n for col in imgs:\n assert col[\"type\"] in [\"rgb\", \"uv\", \"grayscale\"]\n if col[\"type\"] == \"rgb\":\n rgb_kwargs = self.DEFAULT_RGB_KWARGS.copy()\n rgb_kwargs.update(col[\"kwargs\"])\n cols.append(self.get_rgb_image_(col[\"img\"], **rgb_kwargs))\n elif col[\"type\"] == \"uv\":\n uv_kwargs = self.DEFAULT_UV_KWARGS.copy()\n uv_kwargs.update(col[\"kwargs\"])\n cols.append(self.get_uv_image_(col[\"img\"], **uv_kwargs))\n elif col[\"type\"] == \"grayscale\":\n grayscale_kwargs = self.DEFAULT_GRAYSCALE_KWARGS.copy()\n grayscale_kwargs.update(col[\"kwargs\"])\n cols.append(self.get_grayscale_image_(col[\"img\"], **grayscale_kwargs))\n\n if align == \"max\":\n h = max([col.shape[0] for col in cols])\n w = max([col.shape[1] for col in cols])\n elif align == \"min\":\n h = min([col.shape[0] for col in cols])\n w = min([col.shape[1] for col in cols])\n elif isinstance(align, int):\n h = align\n w = align\n elif (\n isinstance(align, tuple)\n and isinstance(align[0], int)\n and isinstance(align[1], int)\n ):\n h, w = align\n else:\n raise ValueError(\n f\"Unsupported image grid align: {align}, should be min, max, int or (int, int)\"\n )\n\n for i in range(len(cols)):\n if cols[i].shape[0] != h or cols[i].shape[1] != w:\n cols[i] = cv2.resize(cols[i], (w, h), interpolation=cv2.INTER_LINEAR)\n return np.concatenate(cols, axis=1)\n\n def save_image_grid(\n self,\n filename,\n imgs,\n align=DEFAULT_GRID_KWARGS[\"align\"],\n name: Optional[str] = None,\n step: Optional[int] = None,\n texts: Optional[List[float]] = None,\n ):\n save_path = self.get_save_path(filename)\n img = self.get_image_grid_(imgs, align=align)\n\n if texts is not None:\n img = Image.fromarray(img)\n draw = ImageDraw.Draw(img)\n black, white = (0, 0, 0), (255, 255, 255)\n for i, text in enumerate(texts):\n draw.text((2, (img.size[1] // len(texts)) * i + 1), f\"{text}\", white)\n draw.text((0, (img.size[1] // len(texts)) * i + 1), f\"{text}\", white)\n draw.text((2, (img.size[1] // len(texts)) * i - 1), f\"{text}\", white)\n draw.text((0, (img.size[1] // len(texts)) * i - 1), f\"{text}\", white)\n draw.text((1, (img.size[1] // len(texts)) * i), f\"{text}\", black)\n img = np.asarray(img)\n\n cv2.imwrite(save_path, img)\n if name and self._wandb_logger:\n wandb.log({name: wandb.Image(save_path), \"trainer/global_step\": step})\n\n save_vis_path = os.path.join(self.get_exp_root_dir(), \"vis.jpg\")\n os.makedirs(os.path.dirname(save_vis_path), exist_ok=True)\n cv2.imwrite(save_vis_path, img)\n return save_path\n\n def save_image(self, filename, img) -> str:\n save_path = self.get_save_path(filename)\n img = self.convert_data(img)\n assert img.dtype == np.uint8 or img.dtype == np.uint16\n if img.ndim == 3 and img.shape[-1] == 3:\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n elif img.ndim == 3 and img.shape[-1] == 4:\n img = cv2.cvtColor(img, cv2.COLOR_RGBA2BGRA)\n cv2.imwrite(save_path, img)\n return save_path\n\n def save_img_ffmpeg(self, save_path, fps=30, *img_handler_list):\n # fix the bug the string has \"'s\"\n command = 'ffmpeg -r {} -pattern_type glob -i \"{}\" -vcodec libx264 -crf 18 -vf \"pad=ceil(iw/2)*2:ceil(ih/2)*2\" -pix_fmt yuv420p {}'\n ffmpeg_list = []\n\n for handler_idx, img_handler in enumerate(img_handler_list):\n img_dir = os.path.join(self.get_save_dir(), img_handler, \"*.png\")\n\n tmp_file_name = os.path.join(\n self.get_save_dir(), \"tmp_{:04d}.mp4\".format(handler_idx)\n ).replace(\"'\", \"\\\\'\")\n img_dir = img_dir.replace(\"'\", \"\\\\'\")\n cmd = command.format(fps, img_dir, tmp_file_name)\n\n os.system(\"rm -rf {}\".format(tmp_file_name))\n\n os.system(cmd)\n ffmpeg_list.append(tmp_file_name)\n\n save_path = os.path.join(self.get_save_dir(), save_path).replace(\"'\", \"\\\\'\")\n\n os.system(\"rm -rf {}\".format(save_path))\n cmd = 'ffmpeg -i {} -i {} -filter_complex \"[0:v][1:v]concat=n=2:v=1:a=0\" -c:v libx264 -crf 23 -preset veryfast -c:a aac -b:a 128k {}'.format(\n ffmpeg_list[0], ffmpeg_list[1], save_path\n )\n\n os.system(cmd)\n\n def save_cubemap(self, filename, img, data_range=(0, 1), rgba=False) -> str:\n save_path = self.get_save_path(filename)\n img = self.convert_data(img)\n assert img.ndim == 4 and img.shape[0] == 6 and img.shape[1] == img.shape[2]\n\n imgs_full = []\n for start in range(0, img.shape[-1], 3):\n img_ = img[..., start : start + 3]\n img_ = np.stack(\n [\n self.get_rgb_image_(img_[i], \"HWC\", data_range, rgba=rgba)\n for i in range(img_.shape[0])\n ],\n axis=0,\n )\n size = img_.shape[1]\n placeholder = np.zeros((size, size, 3), dtype=np.float32)\n img_full = np.concatenate(\n [\n np.concatenate(\n [placeholder, img_[2], placeholder, placeholder], axis=1\n ),\n np.concatenate([img_[1], img_[4], img_[0], img_[5]], axis=1),\n np.concatenate(\n [placeholder, img_[3], placeholder, placeholder], axis=1\n ),\n ],\n axis=0,\n )\n imgs_full.append(img_full)\n\n imgs_full = np.concatenate(imgs_full, axis=1)\n cv2.imwrite(save_path, imgs_full)\n return save_path\n\n def save_data(self, filename, data) -> str:\n data = self.convert_data(data)\n if isinstance(data, dict):\n if not filename.endswith(\".npz\"):\n filename += \".npz\"\n save_path = self.get_save_path(filename)\n np.savez(save_path, **data)\n else:\n if not filename.endswith(\".npy\"):\n filename += \".npy\"\n save_path = self.get_save_path(filename)\n np.save(save_path, data)\n return save_path\n\n def save_state_dict(self, filename, data) -> str:\n save_path = self.get_save_path(filename)\n torch.save(data, save_path)\n return save_path\n\n def save_img_sequence(\n self,\n filename,\n img_dir,\n matcher,\n save_format=\"mp4\",\n fps=30,\n name: Optional[str] = None,\n step: Optional[int] = None,\n ) -> str:\n assert save_format in [\"gif\", \"mp4\"]\n if not filename.endswith(save_format):\n filename += f\".{save_format}\"\n save_path = self.get_save_path(filename)\n matcher = re.compile(matcher)\n img_dir = os.path.join(self.get_save_dir(), img_dir)\n imgs = []\n for f in os.listdir(img_dir):\n if matcher.search(f):\n imgs.append(f)\n imgs = sorted(imgs, key=lambda f: int(matcher.search(f).groups()[0]))\n imgs = [cv2.imread(os.path.join(img_dir, f)) for f in imgs]\n\n if save_format == \"gif\":\n imgs = [cv2.cvtColor(i, cv2.COLOR_BGR2RGB) for i in imgs]\n imageio.mimsave(save_path, imgs, fps=fps, palettesize=256)\n elif save_format == \"mp4\":\n imgs = [cv2.cvtColor(i, cv2.COLOR_BGR2RGB) for i in imgs]\n imageio.mimsave(save_path, imgs, fps=fps)\n if name and self._wandb_logger:\n wandb.log(\n {\n name: wandb.Video(save_path, format=\"mp4\"),\n \"trainer/global_step\": step,\n }\n )\n return save_path\n\n def save_mesh(self, filename, v_pos, t_pos_idx, v_tex=None, t_tex_idx=None) -> str:\n save_path = self.get_save_path(filename)\n v_pos = self.convert_data(v_pos)\n t_pos_idx = self.convert_data(t_pos_idx)\n mesh = trimesh.Trimesh(vertices=v_pos, faces=t_pos_idx)\n mesh.export(save_path)\n return save_path\n\n def save_obj(\n self,\n filename: str,\n mesh: Mesh,\n save_mat: bool = False,\n save_normal: bool = False,\n save_uv: bool = False,\n save_vertex_color: bool = False,\n map_Kd: Optional[Float[Tensor, \"H W 3\"]] = None,\n map_Ks: Optional[Float[Tensor, \"H W 3\"]] = None,\n map_Bump: Optional[Float[Tensor, \"H W 3\"]] = None,\n map_Pm: Optional[Float[Tensor, \"H W 1\"]] = None,\n map_Pr: Optional[Float[Tensor, \"H W 1\"]] = None,\n map_format: str = \"jpg\",\n ) -> List[str]:\n save_paths: List[str] = []\n if not filename.endswith(\".obj\"):\n filename += \".obj\"\n v_pos, t_pos_idx = self.convert_data(mesh.v_pos), self.convert_data(\n mesh.t_pos_idx\n )\n v_nrm, v_tex, t_tex_idx, v_rgb = None, None, None, None\n if save_normal:\n v_nrm = self.convert_data(mesh.v_nrm)\n if save_uv:\n v_tex, t_tex_idx = self.convert_data(mesh.v_tex), self.convert_data(\n mesh.t_tex_idx\n )\n if save_vertex_color:\n v_rgb = self.convert_data(mesh.v_rgb)\n matname, mtllib = None, None\n if save_mat:\n matname = \"default\"\n mtl_filename = filename.replace(\".obj\", \".mtl\")\n mtllib = os.path.basename(mtl_filename)\n mtl_save_paths = self._save_mtl(\n mtl_filename,\n matname,\n map_Kd=self.convert_data(map_Kd),\n map_Ks=self.convert_data(map_Ks),\n map_Bump=self.convert_data(map_Bump),\n map_Pm=self.convert_data(map_Pm),\n map_Pr=self.convert_data(map_Pr),\n map_format=map_format,\n )\n save_paths += mtl_save_paths\n obj_save_path = self._save_obj(\n filename,\n v_pos,\n t_pos_idx,\n v_nrm=v_nrm,\n v_tex=v_tex,\n t_tex_idx=t_tex_idx,\n v_rgb=v_rgb,\n matname=matname,\n mtllib=mtllib,\n )\n save_fix_dir = os.path.dirname(obj_save_path) + \"_fix\"\n save_fix_obj_path = os.path.join(save_fix_dir, os.path.basename(obj_save_path))\n os.makedirs(save_fix_dir, exist_ok=True)\n fix_mesh(obj_save_path, save_fix_obj_path)\n save_paths.append(obj_save_path)\n return save_paths\n\n def _save_obj(\n self,\n filename,\n v_pos,\n t_pos_idx,\n v_nrm=None,\n v_tex=None,\n t_tex_idx=None,\n v_rgb=None,\n matname=None,\n mtllib=None,\n ) -> str:\n obj_str = \"\"\n if matname is not None:\n obj_str += f\"mtllib {mtllib}\\n\"\n obj_str += f\"g object\\n\"\n obj_str += f\"usemtl {matname}\\n\"\n for i in range(len(v_pos)):\n obj_str += f\"v {v_pos[i][0]} {v_pos[i][1]} {v_pos[i][2]}\"\n if v_rgb is not None:\n obj_str += f\" {v_rgb[i][0]} {v_rgb[i][1]} {v_rgb[i][2]}\"\n obj_str += \"\\n\"\n if v_nrm is not None:\n for v in v_nrm:\n obj_str += f\"vn {v[0]} {v[1]} {v[2]}\\n\"\n if v_tex is not None:\n for v in v_tex:\n obj_str += f\"vt {v[0]} {1.0 - v[1]}\\n\"\n\n for i in range(len(t_pos_idx)):\n obj_str += \"f\"\n for j in range(3):\n obj_str += f\" {t_pos_idx[i][j] + 1}/\"\n if v_tex is not None:\n obj_str += f\"{t_tex_idx[i][j] + 1}\"\n obj_str += \"/\"\n if v_nrm is not None:\n obj_str += f\"{t_pos_idx[i][j] + 1}\"\n obj_str += \"\\n\"\n\n save_path = self.get_save_path(filename)\n with open(save_path, \"w\") as f:\n f.write(obj_str)\n return save_path\n\n def _save_mtl(\n self,\n filename,\n matname,\n Ka=(0.0, 0.0, 0.0),\n Kd=(1.0, 1.0, 1.0),\n Ks=(0.0, 0.0, 0.0),\n map_Kd=None,\n map_Ks=None,\n map_Bump=None,\n map_Pm=None,\n map_Pr=None,\n map_format=\"jpg\",\n step: Optional[int] = None,\n ) -> List[str]:\n mtl_save_path = self.get_save_path(filename)\n save_paths = [mtl_save_path]\n mtl_str = f\"newmtl {matname}\\n\"\n mtl_str += f\"Ka {Ka[0]} {Ka[1]} {Ka[2]}\\n\"\n if map_Kd is not None:\n map_Kd_save_path = os.path.join(\n os.path.dirname(mtl_save_path), f\"texture_kd.{map_format}\"\n )\n mtl_str += f\"map_Kd texture_kd.{map_format}\\n\"\n self._save_rgb_image(\n map_Kd_save_path,\n map_Kd,\n data_format=\"HWC\",\n data_range=(0, 1),\n name=f\"{matname}_Kd\",\n step=step,\n )\n save_paths.append(map_Kd_save_path)\n else:\n mtl_str += f\"Kd {Kd[0]} {Kd[1]} {Kd[2]}\\n\"\n if map_Ks is not None:\n map_Ks_save_path = os.path.join(\n os.path.dirname(mtl_save_path), f\"texture_ks.{map_format}\"\n )\n mtl_str += f\"map_Ks texture_ks.{map_format}\\n\"\n self._save_rgb_image(\n map_Ks_save_path,\n map_Ks,\n data_format=\"HWC\",\n data_range=(0, 1),\n name=f\"{matname}_Ks\",\n step=step,\n )\n save_paths.append(map_Ks_save_path)\n else:\n mtl_str += f\"Ks {Ks[0]} {Ks[1]} {Ks[2]}\\n\"\n if map_Bump is not None:\n map_Bump_save_path = os.path.join(\n os.path.dirname(mtl_save_path), f\"texture_nrm.{map_format}\"\n )\n mtl_str += f\"map_Bump texture_nrm.{map_format}\\n\"\n self._save_rgb_image(\n map_Bump_save_path,\n map_Bump,\n data_format=\"HWC\",\n data_range=(0, 1),\n name=f\"{matname}_Bump\",\n step=step,\n )\n save_paths.append(map_Bump_save_path)\n if map_Pm is not None:\n map_Pm_save_path = os.path.join(\n os.path.dirname(mtl_save_path), f\"texture_metallic.{map_format}\"\n )\n mtl_str += f\"map_Pm texture_metallic.{map_format}\\n\"\n self._save_grayscale_image(\n map_Pm_save_path,\n map_Pm,\n data_range=(0, 1),\n cmap=None,\n name=f\"{matname}_refl\",\n step=step,\n )\n save_paths.append(map_Pm_save_path)\n if map_Pr is not None:\n map_Pr_save_path = os.path.join(\n os.path.dirname(mtl_save_path), f\"texture_roughness.{map_format}\"\n )\n mtl_str += f\"map_Pr texture_roughness.{map_format}\\n\"\n self._save_grayscale_image(\n map_Pr_save_path,\n map_Pr,\n data_range=(0, 1),\n cmap=None,\n name=f\"{matname}_Ns\",\n step=step,\n )\n save_paths.append(map_Pr_save_path)\n with open(self.get_save_path(filename), \"w\") as f:\n f.write(mtl_str)\n return save_paths\n\n def save_file(self, filename, src_path) -> str:\n save_path = self.get_save_path(filename)\n shutil.copyfile(src_path, save_path)\n return save_path\n\n def save_json(self, filename, payload) -> str:\n save_path = self.get_save_path(filename)\n with open(save_path, \"w\") as f:\n f.write(json.dumps(payload))\n return save_path" } ]
import os import pytorch_lightning as pl import torch.nn.functional as F import threestudio from dataclasses import dataclass, field from threestudio.models.exporters.base import Exporter, ExporterOutput from threestudio.systems.utils import parse_optimizer, parse_scheduler from threestudio.utils.base import (Updateable, update_end_if_possible, update_if_possible,) from threestudio.utils.config import parse_structured from threestudio.utils.misc import C, cleanup, get_device, load_module_weights from threestudio.utils.saving import SaverMixin from threestudio.utils.typing import * from threestudio.utils.config import load_config, parse_structured
9,625
class BaseSystem(pl.LightningModule, Updateable, SaverMixin): @dataclass class Config: loggers: dict = field(default_factory=dict) loss: dict = field(default_factory=dict) optimizer: dict = field(default_factory=dict) scheduler: Optional[dict] = None weights: Optional[str] = None weights_ignore_modules: Optional[List[str]] = None cleanup_after_validation_step: bool = False cleanup_after_test_step: bool = False cfg: Config def __init__(self, cfg, resumed=False) -> None: super().__init__() self.cfg = parse_structured(self.Config, cfg) self._save_dir: Optional[str] = None self._resumed: bool = resumed self._resumed_eval: bool = False self._resumed_eval_status: dict = {"global_step": 0, "current_epoch": 0} if "loggers" in cfg: self.create_loggers(cfg.loggers) self.configure() if self.cfg.weights is not None: self.load_weights(self.cfg.weights, self.cfg.weights_ignore_modules) self.post_configure() def load_weights(self, weights: str, ignore_modules: Optional[List[str]] = None): state_dict, epoch, global_step = load_module_weights( weights, ignore_modules=ignore_modules, map_location="cpu" ) self.load_state_dict(state_dict, strict=False) # restore step-dependent states self.do_update_step(epoch, global_step, on_load_weights=True) def set_resume_status(self, current_epoch: int, global_step: int): # restore correct epoch and global step in eval self._resumed_eval = True self._resumed_eval_status["current_epoch"] = current_epoch self._resumed_eval_status["global_step"] = global_step @property def resumed(self): # whether from resumed checkpoint return self._resumed @property def true_global_step(self): if self._resumed_eval: return self._resumed_eval_status["global_step"] else: return self.global_step @property def true_current_epoch(self): if self._resumed_eval: return self._resumed_eval_status["current_epoch"] else: return self.current_epoch def configure(self) -> None: pass def post_configure(self) -> None: """ executed after weights are loaded """ pass def C(self, value: Any) -> float: return C(value, self.true_current_epoch, self.true_global_step) def configure_optimizers(self):
class BaseSystem(pl.LightningModule, Updateable, SaverMixin): @dataclass class Config: loggers: dict = field(default_factory=dict) loss: dict = field(default_factory=dict) optimizer: dict = field(default_factory=dict) scheduler: Optional[dict] = None weights: Optional[str] = None weights_ignore_modules: Optional[List[str]] = None cleanup_after_validation_step: bool = False cleanup_after_test_step: bool = False cfg: Config def __init__(self, cfg, resumed=False) -> None: super().__init__() self.cfg = parse_structured(self.Config, cfg) self._save_dir: Optional[str] = None self._resumed: bool = resumed self._resumed_eval: bool = False self._resumed_eval_status: dict = {"global_step": 0, "current_epoch": 0} if "loggers" in cfg: self.create_loggers(cfg.loggers) self.configure() if self.cfg.weights is not None: self.load_weights(self.cfg.weights, self.cfg.weights_ignore_modules) self.post_configure() def load_weights(self, weights: str, ignore_modules: Optional[List[str]] = None): state_dict, epoch, global_step = load_module_weights( weights, ignore_modules=ignore_modules, map_location="cpu" ) self.load_state_dict(state_dict, strict=False) # restore step-dependent states self.do_update_step(epoch, global_step, on_load_weights=True) def set_resume_status(self, current_epoch: int, global_step: int): # restore correct epoch and global step in eval self._resumed_eval = True self._resumed_eval_status["current_epoch"] = current_epoch self._resumed_eval_status["global_step"] = global_step @property def resumed(self): # whether from resumed checkpoint return self._resumed @property def true_global_step(self): if self._resumed_eval: return self._resumed_eval_status["global_step"] else: return self.global_step @property def true_current_epoch(self): if self._resumed_eval: return self._resumed_eval_status["current_epoch"] else: return self.current_epoch def configure(self) -> None: pass def post_configure(self) -> None: """ executed after weights are loaded """ pass def C(self, value: Any) -> float: return C(value, self.true_current_epoch, self.true_global_step) def configure_optimizers(self):
optim = parse_optimizer(self.cfg.optimizer, self)
2
2023-12-06 07:53:11+00:00
12k
rehg-lab/RAVE
annotator/oneformer/detectron2/modeling/roi_heads/fast_rcnn.py
[ { "identifier": "configurable", "path": "annotator/oneformer/detectron2/config/config.py", "snippet": "def configurable(init_func=None, *, from_config=None):\r\n \"\"\"\r\n Decorate a function or a class's __init__ method so that it can be called\r\n with a :class:`CfgNode` object using a :func:`from_config` function that translates\r\n :class:`CfgNode` to arguments.\r\n\r\n Examples:\r\n ::\r\n # Usage 1: Decorator on __init__:\r\n class A:\r\n @configurable\r\n def __init__(self, a, b=2, c=3):\r\n pass\r\n\r\n @classmethod\r\n def from_config(cls, cfg): # 'cfg' must be the first argument\r\n # Returns kwargs to be passed to __init__\r\n return {\"a\": cfg.A, \"b\": cfg.B}\r\n\r\n a1 = A(a=1, b=2) # regular construction\r\n a2 = A(cfg) # construct with a cfg\r\n a3 = A(cfg, b=3, c=4) # construct with extra overwrite\r\n\r\n # Usage 2: Decorator on any function. Needs an extra from_config argument:\r\n @configurable(from_config=lambda cfg: {\"a: cfg.A, \"b\": cfg.B})\r\n def a_func(a, b=2, c=3):\r\n pass\r\n\r\n a1 = a_func(a=1, b=2) # regular call\r\n a2 = a_func(cfg) # call with a cfg\r\n a3 = a_func(cfg, b=3, c=4) # call with extra overwrite\r\n\r\n Args:\r\n init_func (callable): a class's ``__init__`` method in usage 1. The\r\n class must have a ``from_config`` classmethod which takes `cfg` as\r\n the first argument.\r\n from_config (callable): the from_config function in usage 2. It must take `cfg`\r\n as its first argument.\r\n \"\"\"\r\n\r\n if init_func is not None:\r\n assert (\r\n inspect.isfunction(init_func)\r\n and from_config is None\r\n and init_func.__name__ == \"__init__\"\r\n ), \"Incorrect use of @configurable. Check API documentation for examples.\"\r\n\r\n @functools.wraps(init_func)\r\n def wrapped(self, *args, **kwargs):\r\n try:\r\n from_config_func = type(self).from_config\r\n except AttributeError as e:\r\n raise AttributeError(\r\n \"Class with @configurable must have a 'from_config' classmethod.\"\r\n ) from e\r\n if not inspect.ismethod(from_config_func):\r\n raise TypeError(\"Class with @configurable must have a 'from_config' classmethod.\")\r\n\r\n if _called_with_cfg(*args, **kwargs):\r\n explicit_args = _get_args_from_config(from_config_func, *args, **kwargs)\r\n init_func(self, **explicit_args)\r\n else:\r\n init_func(self, *args, **kwargs)\r\n\r\n return wrapped\r\n\r\n else:\r\n if from_config is None:\r\n return configurable # @configurable() is made equivalent to @configurable\r\n assert inspect.isfunction(\r\n from_config\r\n ), \"from_config argument of configurable must be a function!\"\r\n\r\n def wrapper(orig_func):\r\n @functools.wraps(orig_func)\r\n def wrapped(*args, **kwargs):\r\n if _called_with_cfg(*args, **kwargs):\r\n explicit_args = _get_args_from_config(from_config, *args, **kwargs)\r\n return orig_func(**explicit_args)\r\n else:\r\n return orig_func(*args, **kwargs)\r\n\r\n wrapped.from_config = from_config\r\n return wrapped\r\n\r\n return wrapper\r" }, { "identifier": "get_fed_loss_cls_weights", "path": "annotator/oneformer/detectron2/data/detection_utils.py", "snippet": "def get_fed_loss_cls_weights(dataset_names: Union[str, List[str]], freq_weight_power=1.0):\r\n \"\"\"\r\n Get frequency weight for each class sorted by class id.\r\n We now calcualte freqency weight using image_count to the power freq_weight_power.\r\n\r\n Args:\r\n dataset_names: list of dataset names\r\n freq_weight_power: power value\r\n \"\"\"\r\n if isinstance(dataset_names, str):\r\n dataset_names = [dataset_names]\r\n\r\n check_metadata_consistency(\"class_image_count\", dataset_names)\r\n\r\n meta = MetadataCatalog.get(dataset_names[0])\r\n class_freq_meta = meta.class_image_count\r\n class_freq = torch.tensor(\r\n [c[\"image_count\"] for c in sorted(class_freq_meta, key=lambda x: x[\"id\"])]\r\n )\r\n class_freq_weight = class_freq.float() ** freq_weight_power\r\n return class_freq_weight\r" }, { "identifier": "batched_nms", "path": "annotator/oneformer/detectron2/layers/nms.py", "snippet": "def batched_nms(\r\n boxes: torch.Tensor, scores: torch.Tensor, idxs: torch.Tensor, iou_threshold: float\r\n):\r\n \"\"\"\r\n Same as torchvision.ops.boxes.batched_nms, but with float().\r\n \"\"\"\r\n assert boxes.shape[-1] == 4\r\n # Note: Torchvision already has a strategy (https://github.com/pytorch/vision/issues/1311)\r\n # to decide whether to use coordinate trick or for loop to implement batched_nms. So we\r\n # just call it directly.\r\n # Fp16 does not have enough range for batched NMS, so adding float().\r\n return box_ops.batched_nms(boxes.float(), scores, idxs, iou_threshold)\r" }, { "identifier": "ShapeSpec", "path": "annotator/oneformer/detectron2/layers/shape_spec.py", "snippet": "class ShapeSpec:\r\n \"\"\"\r\n A simple structure that contains basic shape specification about a tensor.\r\n It is often used as the auxiliary inputs/outputs of models,\r\n to complement the lack of shape inference ability among pytorch modules.\r\n \"\"\"\r\n\r\n channels: Optional[int] = None\r\n height: Optional[int] = None\r\n width: Optional[int] = None\r\n stride: Optional[int] = None\r" }, { "identifier": "cat", "path": "annotator/oneformer/detectron2/layers/wrappers.py", "snippet": "def shapes_to_tensor(x: List[int], device: Optional[torch.device] = None) -> torch.Tensor:\r\ndef check_if_dynamo_compiling():\r\ndef cat(tensors: List[torch.Tensor], dim: int = 0):\r\ndef empty_input_loss_func_wrapper(loss_func):\r\n def wrapped_loss_func(input, target, *, reduction=\"mean\", **kwargs):\r\n def forward(ctx, x, new_shape):\r\n def backward(ctx, grad):\r\n def __init__(self, *args, **kwargs):\r\n def forward(self, x):\r\ndef nonzero_tuple(x):\r\ndef move_device_like(src: torch.Tensor, dst: torch.Tensor) -> torch.Tensor:\r\nclass _NewEmptyTensorOp(torch.autograd.Function):\r\nclass Conv2d(torch.nn.Conv2d):\r" }, { "identifier": "Box2BoxTransform", "path": "annotator/oneformer/detectron2/modeling/box_regression.py", "snippet": "class Box2BoxTransform(object):\r\n \"\"\"\r\n The box-to-box transform defined in R-CNN. The transformation is parameterized\r\n by 4 deltas: (dx, dy, dw, dh). The transformation scales the box's width and height\r\n by exp(dw), exp(dh) and shifts a box's center by the offset (dx * width, dy * height).\r\n \"\"\"\r\n\r\n def __init__(\r\n self, weights: Tuple[float, float, float, float], scale_clamp: float = _DEFAULT_SCALE_CLAMP\r\n ):\r\n \"\"\"\r\n Args:\r\n weights (4-element tuple): Scaling factors that are applied to the\r\n (dx, dy, dw, dh) deltas. In Fast R-CNN, these were originally set\r\n such that the deltas have unit variance; now they are treated as\r\n hyperparameters of the system.\r\n scale_clamp (float): When predicting deltas, the predicted box scaling\r\n factors (dw and dh) are clamped such that they are <= scale_clamp.\r\n \"\"\"\r\n self.weights = weights\r\n self.scale_clamp = scale_clamp\r\n\r\n def get_deltas(self, src_boxes, target_boxes):\r\n \"\"\"\r\n Get box regression transformation deltas (dx, dy, dw, dh) that can be used\r\n to transform the `src_boxes` into the `target_boxes`. That is, the relation\r\n ``target_boxes == self.apply_deltas(deltas, src_boxes)`` is true (unless\r\n any delta is too large and is clamped).\r\n\r\n Args:\r\n src_boxes (Tensor): source boxes, e.g., object proposals\r\n target_boxes (Tensor): target of the transformation, e.g., ground-truth\r\n boxes.\r\n \"\"\"\r\n assert isinstance(src_boxes, torch.Tensor), type(src_boxes)\r\n assert isinstance(target_boxes, torch.Tensor), type(target_boxes)\r\n\r\n src_widths = src_boxes[:, 2] - src_boxes[:, 0]\r\n src_heights = src_boxes[:, 3] - src_boxes[:, 1]\r\n src_ctr_x = src_boxes[:, 0] + 0.5 * src_widths\r\n src_ctr_y = src_boxes[:, 1] + 0.5 * src_heights\r\n\r\n target_widths = target_boxes[:, 2] - target_boxes[:, 0]\r\n target_heights = target_boxes[:, 3] - target_boxes[:, 1]\r\n target_ctr_x = target_boxes[:, 0] + 0.5 * target_widths\r\n target_ctr_y = target_boxes[:, 1] + 0.5 * target_heights\r\n\r\n wx, wy, ww, wh = self.weights\r\n dx = wx * (target_ctr_x - src_ctr_x) / src_widths\r\n dy = wy * (target_ctr_y - src_ctr_y) / src_heights\r\n dw = ww * torch.log(target_widths / src_widths)\r\n dh = wh * torch.log(target_heights / src_heights)\r\n\r\n deltas = torch.stack((dx, dy, dw, dh), dim=1)\r\n assert (src_widths > 0).all().item(), \"Input boxes to Box2BoxTransform are not valid!\"\r\n return deltas\r\n\r\n def apply_deltas(self, deltas, boxes):\r\n \"\"\"\r\n Apply transformation `deltas` (dx, dy, dw, dh) to `boxes`.\r\n\r\n Args:\r\n deltas (Tensor): transformation deltas of shape (N, k*4), where k >= 1.\r\n deltas[i] represents k potentially different class-specific\r\n box transformations for the single box boxes[i].\r\n boxes (Tensor): boxes to transform, of shape (N, 4)\r\n \"\"\"\r\n deltas = deltas.float() # ensure fp32 for decoding precision\r\n boxes = boxes.to(deltas.dtype)\r\n\r\n widths = boxes[:, 2] - boxes[:, 0]\r\n heights = boxes[:, 3] - boxes[:, 1]\r\n ctr_x = boxes[:, 0] + 0.5 * widths\r\n ctr_y = boxes[:, 1] + 0.5 * heights\r\n\r\n wx, wy, ww, wh = self.weights\r\n dx = deltas[:, 0::4] / wx\r\n dy = deltas[:, 1::4] / wy\r\n dw = deltas[:, 2::4] / ww\r\n dh = deltas[:, 3::4] / wh\r\n\r\n # Prevent sending too large values into torch.exp()\r\n dw = torch.clamp(dw, max=self.scale_clamp)\r\n dh = torch.clamp(dh, max=self.scale_clamp)\r\n\r\n pred_ctr_x = dx * widths[:, None] + ctr_x[:, None]\r\n pred_ctr_y = dy * heights[:, None] + ctr_y[:, None]\r\n pred_w = torch.exp(dw) * widths[:, None]\r\n pred_h = torch.exp(dh) * heights[:, None]\r\n\r\n x1 = pred_ctr_x - 0.5 * pred_w\r\n y1 = pred_ctr_y - 0.5 * pred_h\r\n x2 = pred_ctr_x + 0.5 * pred_w\r\n y2 = pred_ctr_y + 0.5 * pred_h\r\n pred_boxes = torch.stack((x1, y1, x2, y2), dim=-1)\r\n return pred_boxes.reshape(deltas.shape)\r" }, { "identifier": "_dense_box_regression_loss", "path": "annotator/oneformer/detectron2/modeling/box_regression.py", "snippet": "def _dense_box_regression_loss(\r\n anchors: List[Union[Boxes, torch.Tensor]],\r\n box2box_transform: Box2BoxTransform,\r\n pred_anchor_deltas: List[torch.Tensor],\r\n gt_boxes: List[torch.Tensor],\r\n fg_mask: torch.Tensor,\r\n box_reg_loss_type=\"smooth_l1\",\r\n smooth_l1_beta=0.0,\r\n):\r\n \"\"\"\r\n Compute loss for dense multi-level box regression.\r\n Loss is accumulated over ``fg_mask``.\r\n\r\n Args:\r\n anchors: #lvl anchor boxes, each is (HixWixA, 4)\r\n pred_anchor_deltas: #lvl predictions, each is (N, HixWixA, 4)\r\n gt_boxes: N ground truth boxes, each has shape (R, 4) (R = sum(Hi * Wi * A))\r\n fg_mask: the foreground boolean mask of shape (N, R) to compute loss on\r\n box_reg_loss_type (str): Loss type to use. Supported losses: \"smooth_l1\", \"giou\",\r\n \"diou\", \"ciou\".\r\n smooth_l1_beta (float): beta parameter for the smooth L1 regression loss. Default to\r\n use L1 loss. Only used when `box_reg_loss_type` is \"smooth_l1\"\r\n \"\"\"\r\n if isinstance(anchors[0], Boxes):\r\n anchors = type(anchors[0]).cat(anchors).tensor # (R, 4)\r\n else:\r\n anchors = cat(anchors)\r\n if box_reg_loss_type == \"smooth_l1\":\r\n gt_anchor_deltas = [box2box_transform.get_deltas(anchors, k) for k in gt_boxes]\r\n gt_anchor_deltas = torch.stack(gt_anchor_deltas) # (N, R, 4)\r\n loss_box_reg = smooth_l1_loss(\r\n cat(pred_anchor_deltas, dim=1)[fg_mask],\r\n gt_anchor_deltas[fg_mask],\r\n beta=smooth_l1_beta,\r\n reduction=\"sum\",\r\n )\r\n elif box_reg_loss_type == \"giou\":\r\n pred_boxes = [\r\n box2box_transform.apply_deltas(k, anchors) for k in cat(pred_anchor_deltas, dim=1)\r\n ]\r\n loss_box_reg = giou_loss(\r\n torch.stack(pred_boxes)[fg_mask], torch.stack(gt_boxes)[fg_mask], reduction=\"sum\"\r\n )\r\n elif box_reg_loss_type == \"diou\":\r\n pred_boxes = [\r\n box2box_transform.apply_deltas(k, anchors) for k in cat(pred_anchor_deltas, dim=1)\r\n ]\r\n loss_box_reg = diou_loss(\r\n torch.stack(pred_boxes)[fg_mask], torch.stack(gt_boxes)[fg_mask], reduction=\"sum\"\r\n )\r\n elif box_reg_loss_type == \"ciou\":\r\n pred_boxes = [\r\n box2box_transform.apply_deltas(k, anchors) for k in cat(pred_anchor_deltas, dim=1)\r\n ]\r\n loss_box_reg = ciou_loss(\r\n torch.stack(pred_boxes)[fg_mask], torch.stack(gt_boxes)[fg_mask], reduction=\"sum\"\r\n )\r\n else:\r\n raise ValueError(f\"Invalid dense box regression loss type '{box_reg_loss_type}'\")\r\n return loss_box_reg\r" }, { "identifier": "Boxes", "path": "annotator/oneformer/detectron2/structures/boxes.py", "snippet": "class Boxes:\r\n \"\"\"\r\n This structure stores a list of boxes as a Nx4 torch.Tensor.\r\n It supports some common methods about boxes\r\n (`area`, `clip`, `nonempty`, etc),\r\n and also behaves like a Tensor\r\n (support indexing, `to(device)`, `.device`, and iteration over all boxes)\r\n\r\n Attributes:\r\n tensor (torch.Tensor): float matrix of Nx4. Each row is (x1, y1, x2, y2).\r\n \"\"\"\r\n\r\n def __init__(self, tensor: torch.Tensor):\r\n \"\"\"\r\n Args:\r\n tensor (Tensor[float]): a Nx4 matrix. Each row is (x1, y1, x2, y2).\r\n \"\"\"\r\n if not isinstance(tensor, torch.Tensor):\r\n tensor = torch.as_tensor(tensor, dtype=torch.float32, device=torch.device(\"cpu\"))\r\n else:\r\n tensor = tensor.to(torch.float32)\r\n if tensor.numel() == 0:\r\n # Use reshape, so we don't end up creating a new tensor that does not depend on\r\n # the inputs (and consequently confuses jit)\r\n tensor = tensor.reshape((-1, 4)).to(dtype=torch.float32)\r\n assert tensor.dim() == 2 and tensor.size(-1) == 4, tensor.size()\r\n\r\n self.tensor = tensor\r\n\r\n def clone(self) -> \"Boxes\":\r\n \"\"\"\r\n Clone the Boxes.\r\n\r\n Returns:\r\n Boxes\r\n \"\"\"\r\n return Boxes(self.tensor.clone())\r\n\r\n def to(self, device: torch.device):\r\n # Boxes are assumed float32 and does not support to(dtype)\r\n return Boxes(self.tensor.to(device=device))\r\n\r\n def area(self) -> torch.Tensor:\r\n \"\"\"\r\n Computes the area of all the boxes.\r\n\r\n Returns:\r\n torch.Tensor: a vector with areas of each box.\r\n \"\"\"\r\n box = self.tensor\r\n area = (box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1])\r\n return area\r\n\r\n def clip(self, box_size: Tuple[int, int]) -> None:\r\n \"\"\"\r\n Clip (in place) the boxes by limiting x coordinates to the range [0, width]\r\n and y coordinates to the range [0, height].\r\n\r\n Args:\r\n box_size (height, width): The clipping box's size.\r\n \"\"\"\r\n assert torch.isfinite(self.tensor).all(), \"Box tensor contains infinite or NaN!\"\r\n h, w = box_size\r\n x1 = self.tensor[:, 0].clamp(min=0, max=w)\r\n y1 = self.tensor[:, 1].clamp(min=0, max=h)\r\n x2 = self.tensor[:, 2].clamp(min=0, max=w)\r\n y2 = self.tensor[:, 3].clamp(min=0, max=h)\r\n self.tensor = torch.stack((x1, y1, x2, y2), dim=-1)\r\n\r\n def nonempty(self, threshold: float = 0.0) -> torch.Tensor:\r\n \"\"\"\r\n Find boxes that are non-empty.\r\n A box is considered empty, if either of its side is no larger than threshold.\r\n\r\n Returns:\r\n Tensor:\r\n a binary vector which represents whether each box is empty\r\n (False) or non-empty (True).\r\n \"\"\"\r\n box = self.tensor\r\n widths = box[:, 2] - box[:, 0]\r\n heights = box[:, 3] - box[:, 1]\r\n keep = (widths > threshold) & (heights > threshold)\r\n return keep\r\n\r\n def __getitem__(self, item) -> \"Boxes\":\r\n \"\"\"\r\n Args:\r\n item: int, slice, or a BoolTensor\r\n\r\n Returns:\r\n Boxes: Create a new :class:`Boxes` by indexing.\r\n\r\n The following usage are allowed:\r\n\r\n 1. `new_boxes = boxes[3]`: return a `Boxes` which contains only one box.\r\n 2. `new_boxes = boxes[2:10]`: return a slice of boxes.\r\n 3. `new_boxes = boxes[vector]`, where vector is a torch.BoolTensor\r\n with `length = len(boxes)`. Nonzero elements in the vector will be selected.\r\n\r\n Note that the returned Boxes might share storage with this Boxes,\r\n subject to Pytorch's indexing semantics.\r\n \"\"\"\r\n if isinstance(item, int):\r\n return Boxes(self.tensor[item].view(1, -1))\r\n b = self.tensor[item]\r\n assert b.dim() == 2, \"Indexing on Boxes with {} failed to return a matrix!\".format(item)\r\n return Boxes(b)\r\n\r\n def __len__(self) -> int:\r\n return self.tensor.shape[0]\r\n\r\n def __repr__(self) -> str:\r\n return \"Boxes(\" + str(self.tensor) + \")\"\r\n\r\n def inside_box(self, box_size: Tuple[int, int], boundary_threshold: int = 0) -> torch.Tensor:\r\n \"\"\"\r\n Args:\r\n box_size (height, width): Size of the reference box.\r\n boundary_threshold (int): Boxes that extend beyond the reference box\r\n boundary by more than boundary_threshold are considered \"outside\".\r\n\r\n Returns:\r\n a binary vector, indicating whether each box is inside the reference box.\r\n \"\"\"\r\n height, width = box_size\r\n inds_inside = (\r\n (self.tensor[..., 0] >= -boundary_threshold)\r\n & (self.tensor[..., 1] >= -boundary_threshold)\r\n & (self.tensor[..., 2] < width + boundary_threshold)\r\n & (self.tensor[..., 3] < height + boundary_threshold)\r\n )\r\n return inds_inside\r\n\r\n def get_centers(self) -> torch.Tensor:\r\n \"\"\"\r\n Returns:\r\n The box centers in a Nx2 array of (x, y).\r\n \"\"\"\r\n return (self.tensor[:, :2] + self.tensor[:, 2:]) / 2\r\n\r\n def scale(self, scale_x: float, scale_y: float) -> None:\r\n \"\"\"\r\n Scale the box with horizontal and vertical scaling factors\r\n \"\"\"\r\n self.tensor[:, 0::2] *= scale_x\r\n self.tensor[:, 1::2] *= scale_y\r\n\r\n @classmethod\r\n def cat(cls, boxes_list: List[\"Boxes\"]) -> \"Boxes\":\r\n \"\"\"\r\n Concatenates a list of Boxes into a single Boxes\r\n\r\n Arguments:\r\n boxes_list (list[Boxes])\r\n\r\n Returns:\r\n Boxes: the concatenated Boxes\r\n \"\"\"\r\n assert isinstance(boxes_list, (list, tuple))\r\n if len(boxes_list) == 0:\r\n return cls(torch.empty(0))\r\n assert all([isinstance(box, Boxes) for box in boxes_list])\r\n\r\n # use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input\r\n cat_boxes = cls(torch.cat([b.tensor for b in boxes_list], dim=0))\r\n return cat_boxes\r\n\r\n @property\r\n def device(self) -> device:\r\n return self.tensor.device\r\n\r\n # type \"Iterator[torch.Tensor]\", yield, and iter() not supported by torchscript\r\n # https://github.com/pytorch/pytorch/issues/18627\r\n @torch.jit.unused\r\n def __iter__(self):\r\n \"\"\"\r\n Yield a box as a Tensor of shape (4,) at a time.\r\n \"\"\"\r\n yield from self.tensor\r" }, { "identifier": "Instances", "path": "annotator/oneformer/detectron2/structures/instances.py", "snippet": "class Instances:\r\n \"\"\"\r\n This class represents a list of instances in an image.\r\n It stores the attributes of instances (e.g., boxes, masks, labels, scores) as \"fields\".\r\n All fields must have the same ``__len__`` which is the number of instances.\r\n\r\n All other (non-field) attributes of this class are considered private:\r\n they must start with '_' and are not modifiable by a user.\r\n\r\n Some basic usage:\r\n\r\n 1. Set/get/check a field:\r\n\r\n .. code-block:: python\r\n\r\n instances.gt_boxes = Boxes(...)\r\n print(instances.pred_masks) # a tensor of shape (N, H, W)\r\n print('gt_masks' in instances)\r\n\r\n 2. ``len(instances)`` returns the number of instances\r\n 3. Indexing: ``instances[indices]`` will apply the indexing on all the fields\r\n and returns a new :class:`Instances`.\r\n Typically, ``indices`` is a integer vector of indices,\r\n or a binary mask of length ``num_instances``\r\n\r\n .. code-block:: python\r\n\r\n category_3_detections = instances[instances.pred_classes == 3]\r\n confident_detections = instances[instances.scores > 0.9]\r\n \"\"\"\r\n\r\n def __init__(self, image_size: Tuple[int, int], **kwargs: Any):\r\n \"\"\"\r\n Args:\r\n image_size (height, width): the spatial size of the image.\r\n kwargs: fields to add to this `Instances`.\r\n \"\"\"\r\n self._image_size = image_size\r\n self._fields: Dict[str, Any] = {}\r\n for k, v in kwargs.items():\r\n self.set(k, v)\r\n\r\n @property\r\n def image_size(self) -> Tuple[int, int]:\r\n \"\"\"\r\n Returns:\r\n tuple: height, width\r\n \"\"\"\r\n return self._image_size\r\n\r\n def __setattr__(self, name: str, val: Any) -> None:\r\n if name.startswith(\"_\"):\r\n super().__setattr__(name, val)\r\n else:\r\n self.set(name, val)\r\n\r\n def __getattr__(self, name: str) -> Any:\r\n if name == \"_fields\" or name not in self._fields:\r\n raise AttributeError(\"Cannot find field '{}' in the given Instances!\".format(name))\r\n return self._fields[name]\r\n\r\n def set(self, name: str, value: Any) -> None:\r\n \"\"\"\r\n Set the field named `name` to `value`.\r\n The length of `value` must be the number of instances,\r\n and must agree with other existing fields in this object.\r\n \"\"\"\r\n with warnings.catch_warnings(record=True):\r\n data_len = len(value)\r\n if len(self._fields):\r\n assert (\r\n len(self) == data_len\r\n ), \"Adding a field of length {} to a Instances of length {}\".format(data_len, len(self))\r\n self._fields[name] = value\r\n\r\n def has(self, name: str) -> bool:\r\n \"\"\"\r\n Returns:\r\n bool: whether the field called `name` exists.\r\n \"\"\"\r\n return name in self._fields\r\n\r\n def remove(self, name: str) -> None:\r\n \"\"\"\r\n Remove the field called `name`.\r\n \"\"\"\r\n del self._fields[name]\r\n\r\n def get(self, name: str) -> Any:\r\n \"\"\"\r\n Returns the field called `name`.\r\n \"\"\"\r\n return self._fields[name]\r\n\r\n def get_fields(self) -> Dict[str, Any]:\r\n \"\"\"\r\n Returns:\r\n dict: a dict which maps names (str) to data of the fields\r\n\r\n Modifying the returned dict will modify this instance.\r\n \"\"\"\r\n return self._fields\r\n\r\n # Tensor-like methods\r\n def to(self, *args: Any, **kwargs: Any) -> \"Instances\":\r\n \"\"\"\r\n Returns:\r\n Instances: all fields are called with a `to(device)`, if the field has this method.\r\n \"\"\"\r\n ret = Instances(self._image_size)\r\n for k, v in self._fields.items():\r\n if hasattr(v, \"to\"):\r\n v = v.to(*args, **kwargs)\r\n ret.set(k, v)\r\n return ret\r\n\r\n def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> \"Instances\":\r\n \"\"\"\r\n Args:\r\n item: an index-like object and will be used to index all the fields.\r\n\r\n Returns:\r\n If `item` is a string, return the data in the corresponding field.\r\n Otherwise, returns an `Instances` where all fields are indexed by `item`.\r\n \"\"\"\r\n if type(item) == int:\r\n if item >= len(self) or item < -len(self):\r\n raise IndexError(\"Instances index out of range!\")\r\n else:\r\n item = slice(item, None, len(self))\r\n\r\n ret = Instances(self._image_size)\r\n for k, v in self._fields.items():\r\n ret.set(k, v[item])\r\n return ret\r\n\r\n def __len__(self) -> int:\r\n for v in self._fields.values():\r\n # use __len__ because len() has to be int and is not friendly to tracing\r\n return v.__len__()\r\n raise NotImplementedError(\"Empty Instances does not support __len__!\")\r\n\r\n def __iter__(self):\r\n raise NotImplementedError(\"`Instances` object is not iterable!\")\r\n\r\n @staticmethod\r\n def cat(instance_lists: List[\"Instances\"]) -> \"Instances\":\r\n \"\"\"\r\n Args:\r\n instance_lists (list[Instances])\r\n\r\n Returns:\r\n Instances\r\n \"\"\"\r\n assert all(isinstance(i, Instances) for i in instance_lists)\r\n assert len(instance_lists) > 0\r\n if len(instance_lists) == 1:\r\n return instance_lists[0]\r\n\r\n image_size = instance_lists[0].image_size\r\n if not isinstance(image_size, torch.Tensor): # could be a tensor in tracing\r\n for i in instance_lists[1:]:\r\n assert i.image_size == image_size\r\n ret = Instances(image_size)\r\n for k in instance_lists[0]._fields.keys():\r\n values = [i.get(k) for i in instance_lists]\r\n v0 = values[0]\r\n if isinstance(v0, torch.Tensor):\r\n values = torch.cat(values, dim=0)\r\n elif isinstance(v0, list):\r\n values = list(itertools.chain(*values))\r\n elif hasattr(type(v0), \"cat\"):\r\n values = type(v0).cat(values)\r\n else:\r\n raise ValueError(\"Unsupported type {} for concatenation\".format(type(v0)))\r\n ret.set(k, values)\r\n return ret\r\n\r\n def __str__(self) -> str:\r\n s = self.__class__.__name__ + \"(\"\r\n s += \"num_instances={}, \".format(len(self))\r\n s += \"image_height={}, \".format(self._image_size[0])\r\n s += \"image_width={}, \".format(self._image_size[1])\r\n s += \"fields=[{}])\".format(\", \".join((f\"{k}: {v}\" for k, v in self._fields.items())))\r\n return s\r\n\r\n __repr__ = __str__\r" }, { "identifier": "get_event_storage", "path": "annotator/oneformer/detectron2/utils/events.py", "snippet": "def get_event_storage():\r\n \"\"\"\r\n Returns:\r\n The :class:`EventStorage` object that's currently being used.\r\n Throws an error if no :class:`EventStorage` is currently enabled.\r\n \"\"\"\r\n assert len(\r\n _CURRENT_STORAGE_STACK\r\n ), \"get_event_storage() has to be called inside a 'with EventStorage(...)' context!\"\r\n return _CURRENT_STORAGE_STACK[-1]\r" } ]
import logging import torch from typing import Callable, Dict, List, Optional, Tuple, Union from torch import nn from torch.nn import functional as F from annotator.oneformer.detectron2.config import configurable from annotator.oneformer.detectron2.data.detection_utils import get_fed_loss_cls_weights from annotator.oneformer.detectron2.layers import ShapeSpec, batched_nms, cat, cross_entropy, nonzero_tuple from annotator.oneformer.detectron2.modeling.box_regression import Box2BoxTransform, _dense_box_regression_loss from annotator.oneformer.detectron2.structures import Boxes, Instances from annotator.oneformer.detectron2.utils.events import get_event_storage
9,977
filter_mask = scores > score_thresh # R x K # R' x 2. First column contains indices of the R predictions; # Second column contains indices of classes. filter_inds = filter_mask.nonzero() if num_bbox_reg_classes == 1: boxes = boxes[filter_inds[:, 0], 0] else: boxes = boxes[filter_mask] scores = scores[filter_mask] # 2. Apply NMS for each class independently. keep = batched_nms(boxes, scores, filter_inds[:, 1], nms_thresh) if topk_per_image >= 0: keep = keep[:topk_per_image] boxes, scores, filter_inds = boxes[keep], scores[keep], filter_inds[keep] result = Instances(image_shape) result.pred_boxes = Boxes(boxes) result.scores = scores result.pred_classes = filter_inds[:, 1] return result, filter_inds[:, 0] class FastRCNNOutputLayers(nn.Module): """ Two linear layers for predicting Fast R-CNN outputs: 1. proposal-to-detection box regression deltas 2. classification scores """ @configurable def __init__( self, input_shape: ShapeSpec, *, box2box_transform, num_classes: int, test_score_thresh: float = 0.0, test_nms_thresh: float = 0.5, test_topk_per_image: int = 100, cls_agnostic_bbox_reg: bool = False, smooth_l1_beta: float = 0.0, box_reg_loss_type: str = "smooth_l1", loss_weight: Union[float, Dict[str, float]] = 1.0, use_fed_loss: bool = False, use_sigmoid_ce: bool = False, get_fed_loss_cls_weights: Optional[Callable] = None, fed_loss_num_classes: int = 50, ): """ NOTE: this interface is experimental. Args: input_shape (ShapeSpec): shape of the input feature to this module box2box_transform (Box2BoxTransform or Box2BoxTransformRotated): num_classes (int): number of foreground classes test_score_thresh (float): threshold to filter predictions results. test_nms_thresh (float): NMS threshold for prediction results. test_topk_per_image (int): number of top predictions to produce per image. cls_agnostic_bbox_reg (bool): whether to use class agnostic for bbox regression smooth_l1_beta (float): transition point from L1 to L2 loss. Only used if `box_reg_loss_type` is "smooth_l1" box_reg_loss_type (str): Box regression loss type. One of: "smooth_l1", "giou", "diou", "ciou" loss_weight (float|dict): weights to use for losses. Can be single float for weighting all losses, or a dict of individual weightings. Valid dict keys are: * "loss_cls": applied to classification loss * "loss_box_reg": applied to box regression loss use_fed_loss (bool): whether to use federated loss which samples additional negative classes to calculate the loss use_sigmoid_ce (bool): whether to calculate the loss using weighted average of binary cross entropy with logits. This could be used together with federated loss get_fed_loss_cls_weights (Callable): a callable which takes dataset name and frequency weight power, and returns the probabilities to sample negative classes for federated loss. The implementation can be found in detectron2/data/detection_utils.py fed_loss_num_classes (int): number of federated classes to keep in total """ super().__init__() if isinstance(input_shape, int): # some backward compatibility input_shape = ShapeSpec(channels=input_shape) self.num_classes = num_classes input_size = input_shape.channels * (input_shape.width or 1) * (input_shape.height or 1) # prediction layer for num_classes foreground classes and one background class (hence + 1) self.cls_score = nn.Linear(input_size, num_classes + 1) num_bbox_reg_classes = 1 if cls_agnostic_bbox_reg else num_classes box_dim = len(box2box_transform.weights) self.bbox_pred = nn.Linear(input_size, num_bbox_reg_classes * box_dim) nn.init.normal_(self.cls_score.weight, std=0.01) nn.init.normal_(self.bbox_pred.weight, std=0.001) for l in [self.cls_score, self.bbox_pred]: nn.init.constant_(l.bias, 0) self.box2box_transform = box2box_transform self.smooth_l1_beta = smooth_l1_beta self.test_score_thresh = test_score_thresh self.test_nms_thresh = test_nms_thresh self.test_topk_per_image = test_topk_per_image self.box_reg_loss_type = box_reg_loss_type if isinstance(loss_weight, float): loss_weight = {"loss_cls": loss_weight, "loss_box_reg": loss_weight} self.loss_weight = loss_weight self.use_fed_loss = use_fed_loss self.use_sigmoid_ce = use_sigmoid_ce self.fed_loss_num_classes = fed_loss_num_classes if self.use_fed_loss: assert self.use_sigmoid_ce, "Please use sigmoid cross entropy loss with federated loss" fed_loss_cls_weights = get_fed_loss_cls_weights() assert ( len(fed_loss_cls_weights) == self.num_classes ), "Please check the provided fed_loss_cls_weights. Their size should match num_classes" self.register_buffer("fed_loss_cls_weights", fed_loss_cls_weights) @classmethod def from_config(cls, cfg, input_shape): return { "input_shape": input_shape,
# Copyright (c) Facebook, Inc. and its affiliates. __all__ = ["fast_rcnn_inference", "FastRCNNOutputLayers"] logger = logging.getLogger(__name__) """ Shape shorthand in this module: N: number of images in the minibatch R: number of ROIs, combined over all images, in the minibatch Ri: number of ROIs in image i K: number of foreground classes. E.g.,there are 80 foreground classes in COCO. Naming convention: deltas: refers to the 4-d (dx, dy, dw, dh) deltas that parameterize the box2box transform (see :class:`box_regression.Box2BoxTransform`). pred_class_logits: predicted class scores in [-inf, +inf]; use softmax(pred_class_logits) to estimate P(class). gt_classes: ground-truth classification labels in [0, K], where [0, K) represent foreground object classes and K represents the background class. pred_proposal_deltas: predicted box2box transform deltas for transforming proposals to detection box predictions. gt_proposal_deltas: ground-truth box2box transform deltas """ def fast_rcnn_inference( boxes: List[torch.Tensor], scores: List[torch.Tensor], image_shapes: List[Tuple[int, int]], score_thresh: float, nms_thresh: float, topk_per_image: int, ): """ Call `fast_rcnn_inference_single_image` for all images. Args: boxes (list[Tensor]): A list of Tensors of predicted class-specific or class-agnostic boxes for each image. Element i has shape (Ri, K * 4) if doing class-specific regression, or (Ri, 4) if doing class-agnostic regression, where Ri is the number of predicted objects for image i. This is compatible with the output of :meth:`FastRCNNOutputLayers.predict_boxes`. scores (list[Tensor]): A list of Tensors of predicted class scores for each image. Element i has shape (Ri, K + 1), where Ri is the number of predicted objects for image i. Compatible with the output of :meth:`FastRCNNOutputLayers.predict_probs`. image_shapes (list[tuple]): A list of (width, height) tuples for each image in the batch. score_thresh (float): Only return detections with a confidence score exceeding this threshold. nms_thresh (float): The threshold to use for box non-maximum suppression. Value in [0, 1]. topk_per_image (int): The number of top scoring detections to return. Set < 0 to return all detections. Returns: instances: (list[Instances]): A list of N instances, one for each image in the batch, that stores the topk most confidence detections. kept_indices: (list[Tensor]): A list of 1D tensor of length of N, each element indicates the corresponding boxes/scores index in [0, Ri) from the input, for image i. """ result_per_image = [ fast_rcnn_inference_single_image( boxes_per_image, scores_per_image, image_shape, score_thresh, nms_thresh, topk_per_image ) for scores_per_image, boxes_per_image, image_shape in zip(scores, boxes, image_shapes) ] return [x[0] for x in result_per_image], [x[1] for x in result_per_image] def _log_classification_stats(pred_logits, gt_classes, prefix="fast_rcnn"): """ Log the classification metrics to EventStorage. Args: pred_logits: Rx(K+1) logits. The last column is for background class. gt_classes: R labels """ num_instances = gt_classes.numel() if num_instances == 0: return pred_classes = pred_logits.argmax(dim=1) bg_class_ind = pred_logits.shape[1] - 1 fg_inds = (gt_classes >= 0) & (gt_classes < bg_class_ind) num_fg = fg_inds.nonzero().numel() fg_gt_classes = gt_classes[fg_inds] fg_pred_classes = pred_classes[fg_inds] num_false_negative = (fg_pred_classes == bg_class_ind).nonzero().numel() num_accurate = (pred_classes == gt_classes).nonzero().numel() fg_num_accurate = (fg_pred_classes == fg_gt_classes).nonzero().numel() storage = get_event_storage() storage.put_scalar(f"{prefix}/cls_accuracy", num_accurate / num_instances) if num_fg > 0: storage.put_scalar(f"{prefix}/fg_cls_accuracy", fg_num_accurate / num_fg) storage.put_scalar(f"{prefix}/false_negative", num_false_negative / num_fg) def fast_rcnn_inference_single_image( boxes, scores, image_shape: Tuple[int, int], score_thresh: float, nms_thresh: float, topk_per_image: int, ): """ Single-image inference. Return bounding-box detection results by thresholding on scores and applying non-maximum suppression (NMS). Args: Same as `fast_rcnn_inference`, but with boxes, scores, and image shapes per image. Returns: Same as `fast_rcnn_inference`, but for only one image. """ valid_mask = torch.isfinite(boxes).all(dim=1) & torch.isfinite(scores).all(dim=1) if not valid_mask.all(): boxes = boxes[valid_mask] scores = scores[valid_mask] scores = scores[:, :-1] num_bbox_reg_classes = boxes.shape[1] // 4 # Convert to Boxes to use the `clip` function ... boxes = Boxes(boxes.reshape(-1, 4)) boxes.clip(image_shape) boxes = boxes.tensor.view(-1, num_bbox_reg_classes, 4) # R x C x 4 # 1. Filter results based on detection scores. It can make NMS more efficient # by filtering out low-confidence detections. filter_mask = scores > score_thresh # R x K # R' x 2. First column contains indices of the R predictions; # Second column contains indices of classes. filter_inds = filter_mask.nonzero() if num_bbox_reg_classes == 1: boxes = boxes[filter_inds[:, 0], 0] else: boxes = boxes[filter_mask] scores = scores[filter_mask] # 2. Apply NMS for each class independently. keep = batched_nms(boxes, scores, filter_inds[:, 1], nms_thresh) if topk_per_image >= 0: keep = keep[:topk_per_image] boxes, scores, filter_inds = boxes[keep], scores[keep], filter_inds[keep] result = Instances(image_shape) result.pred_boxes = Boxes(boxes) result.scores = scores result.pred_classes = filter_inds[:, 1] return result, filter_inds[:, 0] class FastRCNNOutputLayers(nn.Module): """ Two linear layers for predicting Fast R-CNN outputs: 1. proposal-to-detection box regression deltas 2. classification scores """ @configurable def __init__( self, input_shape: ShapeSpec, *, box2box_transform, num_classes: int, test_score_thresh: float = 0.0, test_nms_thresh: float = 0.5, test_topk_per_image: int = 100, cls_agnostic_bbox_reg: bool = False, smooth_l1_beta: float = 0.0, box_reg_loss_type: str = "smooth_l1", loss_weight: Union[float, Dict[str, float]] = 1.0, use_fed_loss: bool = False, use_sigmoid_ce: bool = False, get_fed_loss_cls_weights: Optional[Callable] = None, fed_loss_num_classes: int = 50, ): """ NOTE: this interface is experimental. Args: input_shape (ShapeSpec): shape of the input feature to this module box2box_transform (Box2BoxTransform or Box2BoxTransformRotated): num_classes (int): number of foreground classes test_score_thresh (float): threshold to filter predictions results. test_nms_thresh (float): NMS threshold for prediction results. test_topk_per_image (int): number of top predictions to produce per image. cls_agnostic_bbox_reg (bool): whether to use class agnostic for bbox regression smooth_l1_beta (float): transition point from L1 to L2 loss. Only used if `box_reg_loss_type` is "smooth_l1" box_reg_loss_type (str): Box regression loss type. One of: "smooth_l1", "giou", "diou", "ciou" loss_weight (float|dict): weights to use for losses. Can be single float for weighting all losses, or a dict of individual weightings. Valid dict keys are: * "loss_cls": applied to classification loss * "loss_box_reg": applied to box regression loss use_fed_loss (bool): whether to use federated loss which samples additional negative classes to calculate the loss use_sigmoid_ce (bool): whether to calculate the loss using weighted average of binary cross entropy with logits. This could be used together with federated loss get_fed_loss_cls_weights (Callable): a callable which takes dataset name and frequency weight power, and returns the probabilities to sample negative classes for federated loss. The implementation can be found in detectron2/data/detection_utils.py fed_loss_num_classes (int): number of federated classes to keep in total """ super().__init__() if isinstance(input_shape, int): # some backward compatibility input_shape = ShapeSpec(channels=input_shape) self.num_classes = num_classes input_size = input_shape.channels * (input_shape.width or 1) * (input_shape.height or 1) # prediction layer for num_classes foreground classes and one background class (hence + 1) self.cls_score = nn.Linear(input_size, num_classes + 1) num_bbox_reg_classes = 1 if cls_agnostic_bbox_reg else num_classes box_dim = len(box2box_transform.weights) self.bbox_pred = nn.Linear(input_size, num_bbox_reg_classes * box_dim) nn.init.normal_(self.cls_score.weight, std=0.01) nn.init.normal_(self.bbox_pred.weight, std=0.001) for l in [self.cls_score, self.bbox_pred]: nn.init.constant_(l.bias, 0) self.box2box_transform = box2box_transform self.smooth_l1_beta = smooth_l1_beta self.test_score_thresh = test_score_thresh self.test_nms_thresh = test_nms_thresh self.test_topk_per_image = test_topk_per_image self.box_reg_loss_type = box_reg_loss_type if isinstance(loss_weight, float): loss_weight = {"loss_cls": loss_weight, "loss_box_reg": loss_weight} self.loss_weight = loss_weight self.use_fed_loss = use_fed_loss self.use_sigmoid_ce = use_sigmoid_ce self.fed_loss_num_classes = fed_loss_num_classes if self.use_fed_loss: assert self.use_sigmoid_ce, "Please use sigmoid cross entropy loss with federated loss" fed_loss_cls_weights = get_fed_loss_cls_weights() assert ( len(fed_loss_cls_weights) == self.num_classes ), "Please check the provided fed_loss_cls_weights. Their size should match num_classes" self.register_buffer("fed_loss_cls_weights", fed_loss_cls_weights) @classmethod def from_config(cls, cfg, input_shape): return { "input_shape": input_shape,
"box2box_transform": Box2BoxTransform(weights=cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS),
5
2023-12-05 02:51:53+00:00
12k
DiffusionLight/DiffusionLight
inpaint.py
[ { "identifier": "BallInpainter", "path": "relighting/inpainter.py", "snippet": "class BallInpainter():\n def __init__(self, pipeline, sd_arch, control_generator, disable_water_mask=True):\n self.pipeline = pipeline\n self.sd_arch = sd_arch\n self.control_generator = control_generator\n self.median = {}\n if disable_water_mask:\n self._disable_water_mask()\n\n def _disable_water_mask(self):\n if hasattr(self.pipeline, \"watermark\"):\n self.pipeline.watermark = NoWaterMark()\n print(\"Disabled watermasking\")\n\n @classmethod\n def from_sd(cls, \n model, \n controlnet=None, \n device=0, \n sampler=\"unipc\", \n torch_dtype=torch.float16,\n disable_water_mask=True,\n offload=False\n ):\n if controlnet is not None:\n control_signal_type = get_control_signal_type(controlnet)\n controlnet = ControlNetModel.from_pretrained(controlnet, torch_dtype=torch.float16)\n pipe = CustomStableDiffusionControlNetInpaintPipeline.from_pretrained(\n model,\n controlnet=controlnet,\n torch_dtype=torch_dtype,\n ).to(device)\n control_generator = ControlSignalGenerator(\"sd\", control_signal_type, device=device)\n else:\n pipe = CustomStableDiffusionInpaintPipeline.from_pretrained(\n model,\n torch_dtype=torch_dtype,\n ).to(device)\n control_generator = None\n \n try:\n if torch_dtype==torch.float16 and device != torch.device(\"cpu\"):\n pipe.enable_xformers_memory_efficient_attention()\n except:\n pass\n pipe.set_progress_bar_config(disable=True)\n \n pipe.scheduler = SAMPLERS[sampler].from_config(pipe.scheduler.config)\n \n return BallInpainter(pipe, \"sd\", control_generator, disable_water_mask)\n\n @classmethod\n def from_sdxl(cls, \n model, \n controlnet=None, \n device=0, \n sampler=\"unipc\", \n torch_dtype=torch.float16,\n disable_water_mask=True,\n use_fixed_vae=True,\n offload=False\n ):\n vae = VAE_MODELS[\"sdxl\"]\n vae = AutoencoderKL.from_pretrained(vae, torch_dtype=torch_dtype).to(device) if use_fixed_vae else None\n extra_kwargs = {\"vae\": vae} if vae is not None else {}\n \n if controlnet is not None:\n control_signal_type = get_control_signal_type(controlnet)\n controlnet = ControlNetModel.from_pretrained(\n controlnet,\n variant=\"fp16\" if torch_dtype == torch.float16 else None,\n use_safetensors=True,\n torch_dtype=torch_dtype,\n ).to(device)\n pipe = CustomStableDiffusionXLControlNetInpaintPipeline.from_pretrained(\n model,\n controlnet=controlnet,\n variant=\"fp16\" if torch_dtype == torch.float16 else None,\n use_safetensors=True,\n torch_dtype=torch_dtype,\n **extra_kwargs,\n ).to(device)\n control_generator = ControlSignalGenerator(\"sdxl\", control_signal_type, device=device)\n else:\n pipe = CustomStableDiffusionXLInpaintPipeline.from_pretrained(\n model,\n variant=\"fp16\" if torch_dtype == torch.float16 else None,\n use_safetensors=True,\n torch_dtype=torch_dtype,\n **extra_kwargs,\n ).to(device)\n control_generator = None\n \n try:\n if torch_dtype==torch.float16 and device != torch.device(\"cpu\"):\n pipe.enable_xformers_memory_efficient_attention()\n except:\n pass\n \n if offload and device != torch.device(\"cpu\"):\n pipe.enable_model_cpu_offload()\n pipe.set_progress_bar_config(disable=True)\n pipe.scheduler = SAMPLERS[sampler].from_config(pipe.scheduler.config)\n \n return BallInpainter(pipe, \"sdxl\", control_generator, disable_water_mask)\n\n # TODO: this method should be replaced by inpaint(), but we'll leave it here for now\n # otherwise, the existing experiment code will break down\n def __call__(self, *args, **kwargs):\n return self.pipeline(*args, **kwargs)\n\n def _default_height_width(self, height=None, width=None):\n if (height is not None) and (width is not None):\n return height, width\n if self.sd_arch == \"sd\":\n return (512, 512)\n elif self.sd_arch == \"sdxl\":\n return (1024, 1024)\n else:\n raise NotImplementedError\n\n # this method is for sanity check only\n def get_cache_control_image(self):\n control_image = getattr(self, \"cache_control_image\", None)\n return control_image\n\n def prepare_control_signal(self, image, controlnet_conditioning_scale, extra_kwargs):\n if self.control_generator is not None:\n control_image = self.control_generator(image, **extra_kwargs)\n controlnet_kwargs = {\n \"control_image\": control_image,\n \"controlnet_conditioning_scale\": controlnet_conditioning_scale\n }\n self.cache_control_image = control_image\n else:\n controlnet_kwargs = {}\n\n return controlnet_kwargs\n\n def get_cache_median(self, it):\n if it in self.median: return self.median[it]\n else: return None\n\n def reset_median(self):\n self.median = {}\n print(\"Reset median\")\n\n def load_median(self, path):\n if os.path.exists(path):\n with open(path, \"rb\") as f:\n self.median = pickle.load(f)\n print(f\"Loaded median from {path}\")\n else:\n print(f\"Median not found at {path}!\")\n\n def inpaint_iterative(\n self,\n prompt=None,\n negative_prompt=\"\",\n num_inference_steps=30,\n generator=None, # TODO: remove this\n image=None,\n mask_image=None,\n height=None,\n width=None,\n controlnet_conditioning_scale=0.5,\n num_images_per_prompt=1,\n current_seed=0,\n cross_attention_kwargs={},\n strength=0.8,\n num_iteration=2,\n ball_per_iteration=30,\n agg_mode=\"median\",\n save_intermediate=True,\n cache_dir=\"./temp_inpaint_iterative\",\n disable_progress=False,\n prompt_embeds=None,\n pooled_prompt_embeds=None,\n use_cache_median=False,\n **extra_kwargs,\n ):\n\n def computeMedian(ball_images):\n all = np.stack(ball_images, axis=0)\n median = np.median(all, axis=0)\n idx_median = np.argsort(all, axis=0)[all.shape[0]//2]\n # print(all.shape)\n # print(idx_median.shape)\n return median, idx_median\n\n def generate_balls(avg_image, current_strength, ball_per_iteration, current_iteration):\n print(f\"Inpainting balls for {current_iteration} iteration...\")\n controlnet_kwargs = self.prepare_control_signal(\n image=avg_image,\n controlnet_conditioning_scale=controlnet_conditioning_scale,\n extra_kwargs=extra_kwargs,\n )\n\n ball_images = []\n for i in tqdm(range(ball_per_iteration), disable=disable_progress):\n seed = current_seed + i\n new_generator = torch.Generator().manual_seed(seed)\n\n output_image = self.pipeline(\n prompt=prompt,\n negative_prompt=negative_prompt,\n num_inference_steps=num_inference_steps,\n generator=new_generator,\n image=avg_image,\n mask_image=mask_image,\n height=height,\n width=width,\n num_images_per_prompt=num_images_per_prompt,\n strength=current_strength,\n newx=x,\n newy=y,\n newr=r,\n current_seed=seed,\n cross_attention_kwargs=cross_attention_kwargs,\n prompt_embeds=prompt_embeds,\n pooled_prompt_embeds=pooled_prompt_embeds,\n **controlnet_kwargs\n ).images[0]\n \n ball_image = crop_ball(output_image, mask_ball_for_crop, x, y, r)\n ball_images.append(ball_image)\n\n if save_intermediate:\n os.makedirs(os.path.join(cache_dir, str(current_iteration)), mode=0o777, exist_ok=True)\n output_image.save(os.path.join(cache_dir, str(current_iteration), f\"raw_{i}.png\"))\n Image.fromarray(ball_image).save(os.path.join(cache_dir, str(current_iteration), f\"ball_{i}.png\"))\n # chmod 777\n os.chmod(os.path.join(cache_dir, str(current_iteration), f\"raw_{i}.png\"), 0o0777)\n os.chmod(os.path.join(cache_dir, str(current_iteration), f\"ball_{i}.png\"), 0o0777)\n\n \n return ball_images\n\n if save_intermediate:\n os.makedirs(cache_dir, exist_ok=True)\n\n height, width = self._default_height_width(height, width)\n\n x = extra_kwargs[\"x\"]\n y = extra_kwargs[\"y\"]\n r = 256 if \"r\" not in extra_kwargs else extra_kwargs[\"r\"]\n _, mask_ball_for_crop = get_ideal_normal_ball(size=r)\n \n # generate initial average ball\n avg_image = image\n ball_images = generate_balls(\n avg_image,\n current_strength=1.0,\n ball_per_iteration=ball_per_iteration,\n current_iteration=0,\n )\n\n # ball refinement loop\n image = np.array(image)\n for it in range(1, num_iteration+1):\n if use_cache_median and (self.get_cache_median(it) is not None):\n print(\"Use existing median\")\n all = np.stack(ball_images, axis=0)\n idx_median = self.get_cache_median(it)\n avg_ball = all[idx_median, \n np.arange(idx_median.shape[0])[:, np.newaxis, np.newaxis],\n np.arange(idx_median.shape[1])[np.newaxis, :, np.newaxis],\n np.arange(idx_median.shape[2])[np.newaxis, np.newaxis, :]\n ]\n else:\n avg_ball, idx_median = computeMedian(ball_images)\n print(\"Add new median\")\n self.median[it] = idx_median\n \n avg_image = merge_normal_map(image, avg_ball, mask_ball_for_crop, x, y)\n avg_image = Image.fromarray(avg_image.astype(np.uint8))\n if save_intermediate:\n avg_image.save(os.path.join(cache_dir, f\"average_{it}.png\"))\n # chmod777\n os.chmod(os.path.join(cache_dir, f\"average_{it}.png\"), 0o0777)\n \n ball_images = generate_balls(\n avg_image,\n current_strength=strength,\n ball_per_iteration=ball_per_iteration if it < num_iteration else 1,\n current_iteration=it,\n )\n\n # TODO: add algorithm for select the best ball\n best_ball = ball_images[0]\n output_image = merge_normal_map(image, best_ball, mask_ball_for_crop, x, y)\n return Image.fromarray(output_image.astype(np.uint8))\n\n def inpaint(\n self,\n prompt=None,\n negative_prompt=None,\n num_inference_steps=30,\n generator=None,\n image=None,\n mask_image=None,\n height=None,\n width=None,\n controlnet_conditioning_scale=0.5,\n num_images_per_prompt=1,\n strength=1.0,\n current_seed=0,\n cross_attention_kwargs={},\n prompt_embeds=None,\n pooled_prompt_embeds=None,\n **extra_kwargs,\n ):\n height, width = self._default_height_width(height, width)\n\n controlnet_kwargs = self.prepare_control_signal(\n image=image,\n controlnet_conditioning_scale=controlnet_conditioning_scale,\n extra_kwargs=extra_kwargs,\n )\n \n if generator is None:\n generator = torch.Generator().manual_seed(0)\n\n output_image = self.pipeline(\n prompt=prompt,\n negative_prompt=negative_prompt,\n num_inference_steps=num_inference_steps,\n generator=generator,\n image=image,\n mask_image=mask_image,\n height=height,\n width=width,\n num_images_per_prompt=num_images_per_prompt,\n strength=strength,\n newx = extra_kwargs[\"x\"],\n newy = extra_kwargs[\"y\"],\n newr = getattr(extra_kwargs, \"r\", 256), # default to ball_size = 256\n current_seed=current_seed,\n cross_attention_kwargs=cross_attention_kwargs,\n prompt_embeds=prompt_embeds,\n pooled_prompt_embeds=pooled_prompt_embeds,\n **controlnet_kwargs\n )\n\n return output_image" }, { "identifier": "MaskGenerator", "path": "relighting/mask_utils.py", "snippet": "class MaskGenerator():\n def __init__(self, cache_mask=True):\n self.cache_mask = cache_mask\n self.all_masks = []\n\n def clear_cache(self):\n self.all_masks = []\n\n def retrieve_masks(self):\n return self.all_masks\n\n def generate_grid(self, image, mask_ball, n_ball=16, size=128):\n ball_positions = create_grid(image.size, n_ball, size)\n # _, mask_ball = get_normal_ball(size)\n \n masks = []\n mask_template = np.zeros(image.size)\n for x, y in ball_positions:\n mask = mask_template.copy()\n mask[y:y+size, x:x+size] = 255 * mask_ball\n mask = Image.fromarray(mask.astype(np.uint8), \"L\")\n masks.append(mask)\n\n # if self.cache_mask:\n # self.all_masks.append((x, y, size))\n \n return masks, ball_positions\n\n def generate_single(self, image, mask_ball, x, y, size):\n w,h = image.size # numpy as (h,w) but PIL is (w,h)\n mask = np.zeros((h,w))\n mask[y:y+size, x:x+size] = 255 * mask_ball\n mask = Image.fromarray(mask.astype(np.uint8), \"L\")\n\n return mask\n\n def generate_best(self, image, mask_ball, size):\n w,h = image.size # numpy as (h,w) but PIL is (w,h)\n mask = np.zeros((h,w))\n\n (y, x), _ = find_best_location(np.array(image), ball_size=size)\n mask[y:y+size, x:x+size] = 255 * mask_ball\n mask = Image.fromarray(mask.astype(np.uint8), \"L\")\n\n return mask, (x, y)" }, { "identifier": "get_ideal_normal_ball", "path": "relighting/ball_processor.py", "snippet": "def get_ideal_normal_ball(size, flip_x=True):\n \"\"\"\n Generate normal ball for specific size \n Normal map is x \"left\", y up, z into the screen \n (we flip X to match sobel operator)\n @params\n - size (int) - single value of height and width\n @return:\n - normal_map (np.array) - normal map [size, size, 3]\n - mask (np.array) - mask that make a valid normal map [size,size]\n \"\"\"\n # we flip x to match sobel operator\n x = torch.linspace(1, -1, size)\n y = torch.linspace(1, -1, size)\n x = x.flip(dims=(-1,)) if not flip_x else x\n\n y, x = torch.meshgrid(y, x)\n z = (1 - x**2 - y**2)\n mask = z >= 0\n\n # clean up invalid value outsize the mask\n x = x * mask\n y = y * mask\n z = z * mask\n \n # get real z value\n z = torch.sqrt(z)\n \n # clean up normal map value outside mask \n normal_map = torch.cat([x[..., None], y[..., None], z[..., None]], dim=-1)\n normal_map = normal_map.numpy()\n mask = mask.numpy()\n return normal_map, mask" }, { "identifier": "crop_ball", "path": "relighting/ball_processor.py", "snippet": "def crop_ball(image, mask_ball, x, y, size, apply_mask=True, bg_color = (0, 0, 0)):\n if isinstance(image, Image.Image):\n result = np.array(image)\n else:\n result = image.copy()\n \n result = result[y:y+size, x:x+size]\n if apply_mask:\n result[~mask_ball] = bg_color\n return result" }, { "identifier": "GeneralLoader", "path": "relighting/dataset.py", "snippet": "class GeneralLoader(Dataset):\n def __init__(self,\n root=None,\n num_samples=None,\n res_threshold=((1024, 1024)),\n apply_threshold=False,\n random_shuffle=False,\n process_id = 0,\n process_total = 1,\n limit_input = 0,\n **kwargs,\n ):\n super().__init__(**kwargs)\n self.root = root\n self.res_threshold = res_threshold\n self.apply_threshold = apply_threshold\n self.has_meta = False\n \n if self.root is not None:\n if not os.path.exists(self.root):\n raise Exception(f\"Dataset {self.root} does not exist.\") \n \n paths = natsorted(\n list(glob.glob(os.path.join(self.root, \"*.png\"))) + \\\n list(glob.glob(os.path.join(self.root, \"*.jpg\")))\n )\n self.scene_data = self._load_data_path(paths, num_samples=num_samples)\n \n if random_shuffle:\n SEED = 0\n random.Random(SEED).shuffle(self.scene_data)\n random.Random(SEED).shuffle(self.boundary_info)\n \n if limit_input > 0:\n self.scene_data = self.scene_data[:limit_input]\n self.boundary_info = self.boundary_info[:limit_input]\n \n # please keep this one the last, so, we will filter out scene_data and boundary info\n if process_total > 1:\n self.scene_data = self.scene_data[process_id::process_total]\n self.boundary_info = self.boundary_info[process_id::process_total]\n print(f\"Process {process_id} has {len(self.scene_data)} samples\")\n\n def _load_data_path(self, paths, num_samples=None):\n if os.path.exists(os.path.splitext(paths[0])[0] + \".json\") or os.path.exists(os.path.splitext(paths[-1])[0] + \".json\"):\n self.has_meta = True\n \n if self.has_meta:\n # read metadata\n TARGET_KEY = \"chrome_mask256\"\n for path in paths:\n with open(os.path.splitext(path)[0] + \".json\") as f:\n meta = json.load(f)\n self.meta_data.append(meta)\n boundary = {\n \"x\": meta[TARGET_KEY][\"x\"],\n \"y\": meta[TARGET_KEY][\"y\"],\n \"size\": meta[TARGET_KEY][\"w\"],\n }\n self.boundary_info.append(boundary)\n \n \n scene_data = paths\n if self.apply_threshold:\n scene_data = []\n for path in tqdm(paths):\n img = Image.open(path)\n if (img.size[0] >= self.res_threshold[0]) and (img.size[1] >= self.res_threshold[1]):\n scene_data.append(path)\n \n if num_samples is not None:\n max_idx = min(num_samples, len(scene_data))\n scene_data = scene_data[:max_idx]\n \n return scene_data\n \n @classmethod\n def from_image_paths(cls, paths, *args, **kwargs):\n dataset = cls(*args, **kwargs)\n dataset.scene_data = dataset._load_data_path(paths)\n return dataset" }, { "identifier": "name2hash", "path": "relighting/utils.py", "snippet": "def name2hash(name: str):\n \"\"\"\n @see https://stackoverflow.com/questions/16008670/how-to-hash-a-string-into-8-digits\n \"\"\"\n hash_number = int(hashlib.sha1(name.encode(\"utf-8\")).hexdigest(), 16) % (10 ** 8)\n return hash_number" }, { "identifier": "SD_MODELS", "path": "relighting/argument.py", "snippet": "SD_MODELS = {\n \"sd15_old\": \"runwayml/stable-diffusion-inpainting\",\n \"sd15_new\": \"runwayml/stable-diffusion-inpainting\",\n \"sd21\": \"stabilityai/stable-diffusion-2-inpainting\",\n \"sdxl\": \"stabilityai/stable-diffusion-xl-base-1.0\",\n \"sdxl_fast\": \"stabilityai/stable-diffusion-xl-base-1.0\",\n \"sd15_depth\": \"runwayml/stable-diffusion-inpainting\",\n}" }, { "identifier": "CONTROLNET_MODELS", "path": "relighting/argument.py", "snippet": "CONTROLNET_MODELS = {\n \"sd15_old\": \"fusing/stable-diffusion-v1-5-controlnet-normal\",\n \"sd15_new\": \"lllyasviel/control_v11p_sd15_normalbae\",\n \"sd21\": \"thibaud/controlnet-sd21-normalbae-diffusers\",\n \"sdxl\": \"diffusers/controlnet-depth-sdxl-1.0\",\n \"sdxl_fast\": \"diffusers/controlnet-depth-sdxl-1.0-small\",\n \"sd15_depth\": \"lllyasviel/control_v11f1p_sd15_depth\",\n}" }, { "identifier": "VAE_MODELS", "path": "relighting/argument.py", "snippet": "VAE_MODELS = {\n \"sdxl\": \"madebyollin/sdxl-vae-fp16-fix\",\n \"sdxl_fast\": \"madebyollin/sdxl-vae-fp16-fix\",\n}" } ]
import torch import argparse import numpy as np import torch.distributed as dist import os import json import relighting.dist_utils as dist_util import time from PIL import Image from tqdm.auto import tqdm from relighting.inpainter import BallInpainter from relighting.mask_utils import MaskGenerator from relighting.ball_processor import ( get_ideal_normal_ball, crop_ball ) from relighting.dataset import GeneralLoader from relighting.utils import name2hash from relighting.argument import ( SD_MODELS, CONTROLNET_MODELS, VAE_MODELS )
7,760
# get list of all EVs ev_list = [float(x) for x in args.ev.split(",")] interpolants = [ev / args.max_negative_ev for ev in ev_list] print("EV : ", ev_list) print("EV : ", interpolants) # calculate prompt embeddings prompt_normal = args.prompt prompt_dark = args.prompt_dark prompt_embeds_normal, _, pooled_prompt_embeds_normal, _ = pipe.pipeline.encode_prompt(prompt_normal) prompt_embeds_dark, _, pooled_prompt_embeds_dark, _ = pipe.pipeline.encode_prompt(prompt_dark) # interpolate embeddings interpolate_embeds = [] for t in interpolants: int_prompt_embeds = prompt_embeds_normal + t * (prompt_embeds_dark - prompt_embeds_normal) int_pooled_prompt_embeds = pooled_prompt_embeds_normal + t * (pooled_prompt_embeds_dark - pooled_prompt_embeds_normal) interpolate_embeds.append((int_prompt_embeds, int_pooled_prompt_embeds)) return dict(zip(ev_list, interpolate_embeds)) def main(): # load arguments args = create_argparser().parse_args() # get local rank if args.is_cpu: device = torch.device("cpu") torch_dtype = torch.float32 else: device = dist_util.dev() torch_dtype = torch.float16 # so, we need ball_dilate >= 16 (2*vae_scale_factor) to make our mask shape = (272, 272) assert args.ball_dilate % 2 == 0 # ball dilation should be symmetric # create controlnet pipeline if args.model_option in ["sdxl", "sdxl_fast"] and args.use_controlnet: model, controlnet = SD_MODELS[args.model_option], CONTROLNET_MODELS[args.model_option] pipe = BallInpainter.from_sdxl( model=model, controlnet=controlnet, device=device, torch_dtype = torch_dtype, offload = args.offload ) elif args.model_option in ["sdxl", "sdxl_fast"] and not args.use_controlnet: model = SD_MODELS[args.model_option] pipe = BallInpainter.from_sdxl( model=model, controlnet=None, device=device, torch_dtype = torch_dtype, offload = args.offload ) elif args.use_controlnet: model, controlnet = SD_MODELS[args.model_option], CONTROLNET_MODELS[args.model_option] pipe = BallInpainter.from_sd( model=model, controlnet=controlnet, device=device, torch_dtype = torch_dtype, offload = args.offload ) else: model = SD_MODELS[args.model_option] pipe = BallInpainter.from_sd( model=model, controlnet=None, device=device, torch_dtype = torch_dtype, offload = args.offload ) if args.lora_scale > 0 and args.lora_path is None: raise ValueError("lora scale is not 0 but lora path is not set") if (args.lora_path is not None) and (args.use_lora): print(f"using lora path {args.lora_path}") print(f"using lora scale {args.lora_scale}") pipe.pipeline.load_lora_weights(args.lora_path) pipe.pipeline.fuse_lora(lora_scale=args.lora_scale) # fuse lora weight w' = w + \alpha \Delta w enabled_lora = True else: enabled_lora = False if args.use_torch_compile: try: print("compiling unet model") start_time = time.time() pipe.pipeline.unet = torch.compile(pipe.pipeline.unet, mode="reduce-overhead", fullgraph=True) print("Model compilation time: ", time.time() - start_time) except: pass # default height for sdxl is 1024, if not set, we set default height. if args.model_option == "sdxl" and args.img_height == 0 and args.img_width == 0: args.img_height = 1024 args.img_width = 1024 # load dataset dataset = GeneralLoader( root=args.dataset, resolution=(args.img_width, args.img_height), force_square=args.force_square, return_dict=True, random_shuffle=args.random_loader, process_id=args.idx, process_total=args.total, limit_input=args.limit_input, ) # interpolate embedding embedding_dict = interpolate_embedding(pipe, args) # prepare mask and normal ball
# inpaint the ball on an image # this one is design for general image that does not require special location to place # cross import from inpaint_multi-illum.py def create_argparser(): parser = argparse.ArgumentParser() parser.add_argument("--dataset", type=str, required=True ,help='directory that contain the image') #dataset name or directory parser.add_argument("--ball_size", type=int, default=256, help="size of the ball in pixel") parser.add_argument("--ball_dilate", type=int, default=20, help="How much pixel to dilate the ball to make a sharper edge") parser.add_argument("--prompt", type=str, default="a perfect mirrored reflective chrome ball sphere") parser.add_argument("--prompt_dark", type=str, default="a perfect black dark mirrored reflective chrome ball sphere") parser.add_argument("--negative_prompt", type=str, default="matte, diffuse, flat, dull") parser.add_argument("--model_option", default="sdxl", help='selecting fancy model option (sd15_old, sd15_new, sd21, sdxl)') # [sd15_old, sd15_new, or sd21] parser.add_argument("--output_dir", required=True, type=str, help="output directory") parser.add_argument("--img_height", type=int, default=1024, help="Dataset Image Height") parser.add_argument("--img_width", type=int, default=1024, help="Dataset Image Width") # some good seed 0, 37, 71, 125, 140, 196, 307, 434, 485, 575 | 9021, 9166, 9560, 9814, but default auto is for fairness parser.add_argument("--seed", default="auto", type=str, help="Seed: right now we use single seed instead to reduce the time, (Auto will use hash file name to generate seed)") parser.add_argument("--denoising_step", default=30, type=int, help="number of denoising step of diffusion model") parser.add_argument("--control_scale", default=0.5, type=float, help="controlnet conditioning scale") parser.add_argument('--no_controlnet', dest='use_controlnet', action='store_false', help='by default we using controlnet, we have option to disable to see the different') parser.set_defaults(use_controlnet=True) parser.add_argument('--no_force_square', dest='force_square', action='store_false', help='SDXL is trained for square image, we prefered the square input. but you use this option to disable reshape') parser.set_defaults(force_square=True) parser.add_argument('--no_random_loader', dest='random_loader', action='store_false', help="by default, we random how dataset load. This make us able to peak into the trend of result without waiting entire dataset. but can disable if prefereed") parser.set_defaults(random_loader=True) parser.add_argument('--cpu', dest='is_cpu', action='store_true', help="using CPU inference instead of GPU inference") parser.set_defaults(is_cpu=False) parser.add_argument('--offload', dest='offload', action='store_false', help="to enable diffusers cpu offload") parser.set_defaults(offload=False) parser.add_argument("--limit_input", default=0, type=int, help="limit number of image to process to n image (0 = no limit), useful for run smallset") # LoRA stuff parser.add_argument('--no_lora', dest='use_lora', action='store_false', help='by default we using lora, we have option to disable to see the different') parser.set_defaults(use_lora=True) parser.add_argument("--lora_path", default="models/ThisIsTheFinal-lora-hdr-continuous-largeT@900/0_-5/checkpoint-2500", type=str, help="LoRA Checkpoint path") parser.add_argument("--lora_scale", default=0.75, type=float, help="LoRA scale factor") # speed optimization stuff parser.add_argument('--no_torch_compile', dest='use_torch_compile', action='store_false', help='by default we using torch compile for faster processing speed. disable it if your environemnt is lower than pytorch2.0') parser.set_defaults(use_torch_compile=True) # algorithm + iterative stuff parser.add_argument("--algorithm", type=str, default="iterative", choices=["iterative", "normal"], help="Selecting between iterative or normal (single pass inpaint) algorithm") parser.add_argument("--agg_mode", default="median", type=str) parser.add_argument("--strength", default=0.8, type=float) parser.add_argument("--num_iteration", default=2, type=int) parser.add_argument("--ball_per_iteration", default=30, type=int) parser.add_argument('--no_save_intermediate', dest='save_intermediate', action='store_false') parser.set_defaults(save_intermediate=True) parser.add_argument("--cache_dir", default="./temp_inpaint_iterative", type=str, help="cache directory for iterative inpaint") # pararelle processing parser.add_argument("--idx", default=0, type=int, help="index of the current process, useful for running on multiple node") parser.add_argument("--total", default=1, type=int, help="total number of process") # for HDR stuff parser.add_argument("--max_negative_ev", default=-5, type=int, help="maximum negative EV for lora") parser.add_argument("--ev", default="0,-2.5,-5", type=str, help="EV: list of EV to generate") return parser def get_ball_location(image_data, args): if 'boundary' in image_data: # support predefined boundary if need x = image_data["boundary"]["x"] y = image_data["boundary"]["y"] r = image_data["boundary"]["size"] # support ball dilation half_dilate = args.ball_dilate // 2 # check if not left out-of-bound if x - half_dilate < 0: x += half_dilate if y - half_dilate < 0: y += half_dilate # check if not right out-of-bound if x + r + half_dilate > args.img_width: x -= half_dilate if y + r + half_dilate > args.img_height: y -= half_dilate else: # we use top-left corner notation x, y, r = ((args.img_width // 2) - (args.ball_size // 2), (args.img_height // 2) - (args.ball_size // 2), args.ball_size) return x, y, r def interpolate_embedding(pipe, args): print("interpolate embedding...") # get list of all EVs ev_list = [float(x) for x in args.ev.split(",")] interpolants = [ev / args.max_negative_ev for ev in ev_list] print("EV : ", ev_list) print("EV : ", interpolants) # calculate prompt embeddings prompt_normal = args.prompt prompt_dark = args.prompt_dark prompt_embeds_normal, _, pooled_prompt_embeds_normal, _ = pipe.pipeline.encode_prompt(prompt_normal) prompt_embeds_dark, _, pooled_prompt_embeds_dark, _ = pipe.pipeline.encode_prompt(prompt_dark) # interpolate embeddings interpolate_embeds = [] for t in interpolants: int_prompt_embeds = prompt_embeds_normal + t * (prompt_embeds_dark - prompt_embeds_normal) int_pooled_prompt_embeds = pooled_prompt_embeds_normal + t * (pooled_prompt_embeds_dark - pooled_prompt_embeds_normal) interpolate_embeds.append((int_prompt_embeds, int_pooled_prompt_embeds)) return dict(zip(ev_list, interpolate_embeds)) def main(): # load arguments args = create_argparser().parse_args() # get local rank if args.is_cpu: device = torch.device("cpu") torch_dtype = torch.float32 else: device = dist_util.dev() torch_dtype = torch.float16 # so, we need ball_dilate >= 16 (2*vae_scale_factor) to make our mask shape = (272, 272) assert args.ball_dilate % 2 == 0 # ball dilation should be symmetric # create controlnet pipeline if args.model_option in ["sdxl", "sdxl_fast"] and args.use_controlnet: model, controlnet = SD_MODELS[args.model_option], CONTROLNET_MODELS[args.model_option] pipe = BallInpainter.from_sdxl( model=model, controlnet=controlnet, device=device, torch_dtype = torch_dtype, offload = args.offload ) elif args.model_option in ["sdxl", "sdxl_fast"] and not args.use_controlnet: model = SD_MODELS[args.model_option] pipe = BallInpainter.from_sdxl( model=model, controlnet=None, device=device, torch_dtype = torch_dtype, offload = args.offload ) elif args.use_controlnet: model, controlnet = SD_MODELS[args.model_option], CONTROLNET_MODELS[args.model_option] pipe = BallInpainter.from_sd( model=model, controlnet=controlnet, device=device, torch_dtype = torch_dtype, offload = args.offload ) else: model = SD_MODELS[args.model_option] pipe = BallInpainter.from_sd( model=model, controlnet=None, device=device, torch_dtype = torch_dtype, offload = args.offload ) if args.lora_scale > 0 and args.lora_path is None: raise ValueError("lora scale is not 0 but lora path is not set") if (args.lora_path is not None) and (args.use_lora): print(f"using lora path {args.lora_path}") print(f"using lora scale {args.lora_scale}") pipe.pipeline.load_lora_weights(args.lora_path) pipe.pipeline.fuse_lora(lora_scale=args.lora_scale) # fuse lora weight w' = w + \alpha \Delta w enabled_lora = True else: enabled_lora = False if args.use_torch_compile: try: print("compiling unet model") start_time = time.time() pipe.pipeline.unet = torch.compile(pipe.pipeline.unet, mode="reduce-overhead", fullgraph=True) print("Model compilation time: ", time.time() - start_time) except: pass # default height for sdxl is 1024, if not set, we set default height. if args.model_option == "sdxl" and args.img_height == 0 and args.img_width == 0: args.img_height = 1024 args.img_width = 1024 # load dataset dataset = GeneralLoader( root=args.dataset, resolution=(args.img_width, args.img_height), force_square=args.force_square, return_dict=True, random_shuffle=args.random_loader, process_id=args.idx, process_total=args.total, limit_input=args.limit_input, ) # interpolate embedding embedding_dict = interpolate_embedding(pipe, args) # prepare mask and normal ball
mask_generator = MaskGenerator()
1
2023-12-07 14:03:31+00:00
12k
eliphatfs/zerorf
zerorf.py
[ { "identifier": "MultiSceneNeRF", "path": "lib/models/autoencoders/multiscene_nerf.py", "snippet": "class MultiSceneNeRF(BaseNeRF):\n\n def __init__(self,\n *args,\n cache_size=0, # cache in RAM, top priority\n cache_16bit=False,\n num_file_writers=0, # cache in file system (for large dataset)\n **kwargs):\n super().__init__(*args, **kwargs)\n\n self.cache_size = cache_size\n self.cache_16bit = cache_16bit\n if cache_size > 0:\n rank, ws = get_dist_info()\n split_points = np.round(np.linspace(0, cache_size, num=ws + 1)).astype(np.int64)\n inds = np.arange(start=split_points[rank], stop=split_points[rank + 1])\n self.cache = {ind: None for ind in inds}\n else:\n self.cache = None\n self.cache_loaded = False\n\n self.num_file_writers = num_file_writers\n self.is_file_writers_initialized = False\n\n def init_file_writers(self, save_dir):\n if self.num_file_writers > 0:\n def file_writer(queue):\n while True:\n obj = queue.get()\n torch.save(obj, os.path.join(save_dir, obj['scene_name'] + '.pth'))\n\n self.file_queues = [mp.Queue(maxsize=1) for _ in range(self.num_file_writers)]\n for queue in self.file_queues:\n p = mp.Process(target=file_writer, args=(queue,))\n p.start()\n else:\n self.file_queues = None\n self.is_file_writers_initialized = True\n\n def load_cache(self, data, freeze_code=False):\n device = get_module_device(self)\n num_scenes = len(data['scene_id'])\n rank, ws = get_dist_info()\n\n if self.cache is not None:\n if not self.cache_loaded:\n cache_load_from = self.train_cfg.get('cache_load_from', None)\n loaded = False\n if cache_load_from is not None:\n cache_files = os.listdir(cache_load_from)\n cache_files.sort()\n if len(cache_files) > 0:\n assert len(cache_files) == self.cache_size\n cacheiter = list(self.cache.keys())\n if sys.stdout.isatty() and rank == 0:\n cacheiter = tqdm.tqdm(cacheiter)\n for ind in cacheiter:\n self.cache[ind] = torch.load(\n os.path.join(cache_load_from, cache_files[ind]), map_location='cpu')\n loaded = True\n if rank == 0:\n mmcv.print_log('Loaded cache files from ' + cache_load_from + '.', 'mmgen')\n if not loaded:\n if rank == 0:\n mmcv.print_log('Initialize codes from scratch.', 'mmgen')\n self.cache_loaded = True\n cache_list = [self.cache[scene_id_single] for scene_id_single in data['scene_id']]\n elif 'code' in data:\n cache_list = data['code']\n else:\n cache_list = [None for _ in range(num_scenes)]\n code_list_ = []\n density_grid = []\n density_bitfield = []\n for scene_state_single in cache_list:\n if scene_state_single is None:\n code_list_.append(self.get_init_code_(None, device))\n density_grid.append(self.get_init_density_grid(None, device))\n density_bitfield.append(self.get_init_density_bitfield(None, device))\n else:\n if 'code_' in scene_state_single['param']:\n code_ = scene_state_single['param']['code_'].to(dtype=torch.float32, device=device)\n else:\n assert 'code' in scene_state_single['param']\n if rank == 0:\n warnings.warn(\n 'Pre-activation codes not found. Using on-the-fly inversion instead '\n '(which could be inconsistent).')\n code_ = self.code_activation.inverse(\n scene_state_single['param']['code'].to(dtype=torch.float32, device=device))\n code_list_.append(code_.requires_grad_(not freeze_code))\n density_grid.append(\n scene_state_single['param']['density_grid'].to(device)\n if 'density_grid' in scene_state_single['param']\n else self.get_init_density_grid(None, device))\n density_bitfield.append(\n scene_state_single['param']['density_bitfield'].to(device)\n if 'density_bitfield' in scene_state_single['param']\n else self.get_init_density_bitfield(None, device))\n density_grid = torch.stack(density_grid, dim=0)\n density_bitfield = torch.stack(density_bitfield, dim=0)\n\n code_optimizers = self.build_optimizer(code_list_, self.train_cfg)\n for ind, scene_state_single in enumerate(cache_list):\n if scene_state_single is not None and 'optimizer' in scene_state_single:\n optimizer_set_state(code_optimizers[ind], scene_state_single['optimizer'])\n return code_list_, code_optimizers, density_grid, density_bitfield\n\n def save_cache(self, code_list_, code_optimizers,\n density_grid, density_bitfield, scene_id, scene_name):\n if self.cache_16bit:\n code_dtype = torch.float16 if code_list_[0].dtype == torch.float32 else code_list_[0].dtype\n optimizer_dtype = torch.bfloat16\n else:\n code_dtype = code_list_[0].dtype\n optimizer_dtype = torch.float32\n if 'save_dir' in self.train_cfg:\n save_dir = self.train_cfg['save_dir']\n os.makedirs(save_dir, exist_ok=True)\n if not self.is_file_writers_initialized:\n self.init_file_writers(save_dir)\n else:\n save_dir = None\n for ind, code_single_ in enumerate(code_list_):\n scene_id_single = scene_id[ind]\n out = dict(\n scene_id=scene_id_single,\n scene_name=scene_name[ind],\n param=dict(\n code_=code_single_.data,\n density_grid=density_grid[ind],\n density_bitfield=density_bitfield[ind]),\n optimizer=code_optimizers[ind].state_dict())\n if self.cache is not None:\n if self.cache[scene_id_single] is None:\n self.cache[scene_id_single] = out_dict_to(\n out, device='cpu', code_dtype=code_dtype, optimizer_dtype=optimizer_dtype)\n else:\n if 'scene_id' not in self.cache[scene_id_single]:\n self.cache[scene_id_single]['scene_id'] = out['scene_id']\n if 'scene_name' not in self.cache[scene_id_single]:\n self.cache[scene_id_single]['scene_name'] = out['scene_name']\n if 'code' in self.cache[scene_id_single]['param']:\n del self.cache[scene_id_single]['param']['code']\n for key, val in out['param'].items():\n load_tensor_to_dict(self.cache[scene_id_single]['param'], key, val,\n device='cpu', dtype=code_dtype)\n if 'optimizer' in self.cache[scene_id_single]:\n optimizer_state_copy(out['optimizer'], self.cache[scene_id_single]['optimizer'],\n device='cpu', dtype=optimizer_dtype)\n else:\n self.cache[scene_id_single]['optimizer'] = optimizer_state_to(\n out['optimizer'], device='cpu', dtype=optimizer_dtype)\n if save_dir is not None:\n if self.file_queues is not None:\n self.file_queues[ind // self.num_file_writers].put(\n out_dict_to(out, device='cpu', code_dtype=code_dtype, optimizer_dtype=optimizer_dtype))\n else:\n torch.save(\n out_dict_to(out, device='cpu', code_dtype=code_dtype, optimizer_dtype=optimizer_dtype),\n os.path.join(save_dir, scene_name + '.pth'))\n\n def train_step(self, data, optimizer, running_status=None):\n code_list_, code_optimizers, density_grid, density_bitfield = self.load_cache(data)\n\n # ==== optimize code ====\n cond_imgs = data['cond_imgs'] # (num_scenes, num_imgs, h, w, 3)\n cond_intrinsics = data['cond_intrinsics'] # (num_scenes, num_imgs, 4), in [fx, fy, cx, cy]\n cond_poses = data['cond_poses']\n cond_times = data.get('cond_times')\n\n num_scenes, num_imgs, h, w, _ = cond_imgs.size()\n # (num_scenes, num_imgs, h, w, 3)\n cond_rays_o, cond_rays_d = get_cam_rays(cond_poses, cond_intrinsics, h, w)\n dt_gamma_scale = self.train_cfg.get('dt_gamma_scale', 0.0)\n # (num_scenes,)\n dt_gamma = dt_gamma_scale / cond_intrinsics[..., :2].mean(dim=(-2, -1))\n\n extra_scene_step = self.train_cfg.get('extra_scene_step', 0)\n if extra_scene_step > 0:\n cfg = self.train_cfg.copy()\n cfg['n_inverse_steps'] = extra_scene_step\n self.inverse_code(\n self.decoder, cond_imgs, cond_rays_o, cond_rays_d, dt_gamma=dt_gamma, cfg=cfg,\n code_=code_list_,\n density_grid=density_grid,\n density_bitfield=density_bitfield,\n code_optimizer=code_optimizers)\n\n # ==== joint optimization ====\n for code_optimizer in code_optimizers:\n code_optimizer.zero_grad()\n optimizer['decoder'].zero_grad()\n\n code = self.code_activation(torch.stack(code_list_, dim=0), update_stats=True)\n\n loss, log_vars, out_rgbs, target_rgbs = self.loss_decoder(\n self.decoder, code, density_bitfield, cond_rays_o, cond_rays_d,\n cond_imgs, dt_gamma=dt_gamma, cond_times=cond_times, cfg=self.train_cfg,\n update_extra_state=self.update_extra_iters,\n extra_args=(density_grid, density_bitfield, 0),\n extra_kwargs=dict(\n density_thresh=self.train_cfg['density_thresh']\n ) if 'density_thresh' in self.train_cfg else dict())\n loss.backward()\n log_vars.update(loss=float(loss))\n\n if self.train_cfg.get('decoder_grad_clip', 0.0) > 0.0:\n decoder_grad_norm = torch.nn.utils.clip_grad_norm_(\n self.decoder.parameters(), self.train_cfg['decoder_grad_clip'])\n log_vars.update(decoder_grad_norm=float(decoder_grad_norm))\n optimizer['decoder'].step()\n for code_optimizer in code_optimizers:\n code_optimizer.step()\n\n # ==== save cache ====\n self.save_cache(\n code_list_, code_optimizers,\n density_grid, density_bitfield, data['scene_id'], data['scene_name'])\n\n # ==== evaluate reconstruction ====\n with torch.no_grad():\n self.mean_ema_update(code)\n train_psnr = eval_psnr(out_rgbs, target_rgbs)\n code_rms = code.square().flatten(1).mean().sqrt()\n log_vars.update(train_psnr=float(train_psnr.mean()),\n code_rms=float(code_rms.mean()))\n if 'test_imgs' in data and data['test_imgs'] is not None:\n log_vars.update(self.eval_and_viz(\n data, self.decoder, code, density_bitfield, cfg=self.train_cfg))\n\n # ==== outputs ====\n outputs_dict = dict(\n log_vars=log_vars, num_samples=num_scenes)\n\n return outputs_dict" }, { "identifier": "build_optimizers", "path": "lib/core/optimizer/builder.py", "snippet": "def build_optimizers(model, cfgs):\n \"\"\"Modified from MMGeneration\n \"\"\"\n optimizers = {}\n if hasattr(model, 'module'):\n model = model.module\n # determine whether 'cfgs' has several dicts for optimizers\n is_dict_of_dict = True\n for key, cfg in cfgs.items():\n if not isinstance(cfg, dict):\n is_dict_of_dict = False\n if is_dict_of_dict:\n for key, cfg in cfgs.items():\n cfg_ = cfg.copy()\n module = rgetattr(model, key)\n optimizers[key] = build_optimizer(module, cfg_)\n return optimizers\n\n return build_optimizer(model, cfgs)" }, { "identifier": "OrbitCamera", "path": "lib/core/ssdnerf_gui.py", "snippet": "class OrbitCamera:\n def __init__(self, name, W, H, r=2., fovy=60., euler=[0, 0, 0]):\n self.name = name\n self.W = W\n self.H = H\n self.radius = r # camera distance from center\n self.fovy = fovy # in degree\n self.center = np.array([0, 0, 0], dtype=np.float32) # look at this point\n self.default_rot = R.from_quat([0.5, -0.5, 0.5, -0.5])\n self.rot = copy.deepcopy(self.default_rot)\n self.up = np.array([0, 0, 1], dtype=np.float32) # need to be normalized!\n\n self.set_euler(euler)\n\n # pose\n @property\n def pose(self):\n # first move camera to radius\n res = np.eye(4, dtype=np.float32)\n res[2, 3] -= self.radius\n # rotate\n rot = np.eye(4, dtype=np.float32)\n rot[:3, :3] = self.rot.as_matrix()\n res = rot @ res\n # translate\n res[:3, 3] -= self.center\n return res\n\n def set_pose(self, pose):\n self.rot = R.from_matrix(pose[:3, :3])\n self.center = -pose[:3, 3] - self.rot.as_matrix()[:3, 2] * self.radius\n\n @property\n def intrinsics(self):\n focal = self.H / (2 * np.tan(np.radians(self.fovy) / 2))\n return np.array([focal, focal, self.W / 2, self.H / 2])\n\n @property\n def euler(self):\n return (self.rot * self.default_rot.inv()).as_euler('xyz', degrees=True)\n\n def set_euler(self, euler):\n self.rot = R.from_euler('xyz', euler, degrees=True) * self.default_rot\n\n def orbit(self, dx, dy):\n # rotate along camera up/side axis!\n side = self.rot.as_matrix()[:3, 0] # why this is side --> ? # already normalized.\n rotvec_x = self.up * np.radians(-0.1 * dx)\n rotvec_y = side * np.radians(-0.1 * dy)\n self.rot = R.from_rotvec(rotvec_x) * R.from_rotvec(rotvec_y) * self.rot\n\n def scale(self, delta):\n self.radius *= 1.1 ** (-delta)\n\n def pan(self, dx, dy, dz=0):\n # pan in camera coordinate system (careful on the sensitivity!)\n self.center += 0.0005 * self.rot.as_matrix()[:3, :3] @ np.array([dx, dy, dz])\n\n def pose2str(self):\n with np.printoptions(precision=3, suppress=True):\n return str(self.pose)" }, { "identifier": "NerfSynthetic", "path": "lib/datasets/nerf_synthetic.py", "snippet": "class NerfSynthetic(Dataset):\n\n def __init__(\n self, meta_files: list, world_scale: float = 1.0, rgba: bool = False\n ) -> None:\n super().__init__()\n self.meta_files = meta_files\n self.world_scale = world_scale\n self.rgba = rgba\n\n def __len__(self):\n return len(self.meta_files)\n\n def load_sub(self, sub):\n with open(sub) as mf:\n meta = json.load(mf)\n frames_i = []\n frames_p = []\n frames_c = []\n frames_t = []\n for frame in range(len(meta['frames'])):\n img = plotlib.imread(os.path.join(os.path.dirname(sub), meta['frames'][frame]['file_path'] + '.png'))\n h, w, c = img.shape\n x, y = w / 2, h / 2\n focal_length = y / numpy.tan(meta['camera_angle_x'] / 2)\n # scaling = 320.0 / img.shape[0]\n scaling = 1.0\n if not self.rgba:\n img = img[..., :3] * img[..., 3:] + (1 - img[..., 3:])\n # img = cv2.resize(img, [320, 320], interpolation=cv2.INTER_AREA)\n pose = meta['frames'][frame]['transform_matrix']\n frames_i.append(img)\n frames_p.append((numpy.array(pose) @ BLENDER_TO_OPENCV_MATRIX) * self.world_scale)\n frames_c.append(numpy.array([focal_length, focal_length, x, y]) * scaling)\n if 'time' in meta['frames'][frame]:\n frames_t.append(meta['frames'][frame]['time'])\n f32 = numpy.float32\n return dict(\n cond_imgs=numpy.array(frames_i, f32),\n cond_poses=numpy.array(frames_p, f32),\n cond_intrinsics=numpy.array(frames_c, f32),\n cond_times=numpy.array(frames_t, f32) * 2 - 1 if len(frames_t) else None\n )\n\n def __getitem__(self, index):\n sub = self.meta_files[index]\n return dict(\n scene_id=DC(index, cpu_only=True),\n scene_name=DC(sub, cpu_only=True),\n **self.load_sub(sub)\n )" }, { "identifier": "OppoDataset", "path": "lib/datasets/oppo.py", "snippet": "class OppoDataset(Dataset):\n\n def __init__(\n self, root_dir: str, split: str, world_scale: float = 1.0, rgba: bool = False\n ) -> None:\n super().__init__()\n self.root_dir = root_dir\n self.world_scale = world_scale\n self.rgba = rgba\n self.split = split\n\n self.downsample = 4.0\n self.img_wh = (int(2656 / self.downsample), int(3984 / self.downsample))\n self.define_transforms()\n\n # self.scene_bbox = torch.tensor([[-0.5, -0.5, -0.5], [0.5, 0.5, 0.5]])\n # self.near_far = [0.5, 1.5]\n\n camera_file = os.path.join(self.root_dir, f\"../../transforms_alignz_{split}.json\")\n with open(camera_file, 'r') as f:\n self.meta = json.load(f)['frames']\n\n self.poses = []\n self.imgs = []\n self.intrinsic = []\n w, h = self.img_wh\n\n for k, v in self.meta.items():\n imgid = v['file_path'].split('/')[-1]\n\n focal = 0.5 * v['calib_imgw'] / np.tan(0.5 * v['camera_angle_x']) # original focal length\n if self.downsample != 1.0:\n focal = focal / self.downsample\n\n image_path = os.path.join(self.root_dir, f\"../Lights/013/raw_undistorted/{imgid}.JPG\")\n c2w = np.array(v['transform_matrix'])\n c2w = torch.FloatTensor(c2w)\n self.poses.append(c2w)\n\n self.intrinsic.append(torch.tensor([focal, focal, w / 2, h / 2])) # focal, focal, cx, cy\n\n img = Image.open(image_path)\n\n if self.downsample != 1.0:\n img = img.resize(self.img_wh, Image.LANCZOS)\n img = self.transform(img) # (4, h, w)\n if self.split == 'train':\n mask_path = os.path.join(self.root_dir, f\"com_masks/{imgid}.png\")\n else:\n # mask_path = os.path.join(self.root_dir, f\"obj_masks/{imgid}.png\")\n mask_path = os.path.join(self.root_dir, f\"com_masks/{imgid}.png\")\n mask = cv2.imread(mask_path, 2) > 0\n if self.downsample != 1.0:\n mask = cv2.resize(mask.astype(np.uint8), self.img_wh) > 0\n mask = torch.from_numpy(mask).bool()\n img = img.permute(1,2,0)\n img = img * mask[...,None].float() + (1 - mask[...,None].float()) # blend A to RGB\n if rgba:\n img = torch.cat([img, mask[..., None]], dim=-1)\n self.imgs += [img]\n\n self.poses = torch.stack(self.poses, dim=0) * self.world_scale\n # self.poses = transform_poses_pca(np.array(self.poses))\n self.imgs = torch.stack(self.imgs, dim=0)\n self.intrinsic = torch.stack(self.intrinsic, dim=0)\n\n def define_transforms(self):\n self.transform = T.ToTensor()\n\n def __len__(self):\n return 1\n\n def __getitem__(self, index):\n return dict(\n scene_id=DC(index, cpu_only=True),\n scene_name=DC(self.root_dir, cpu_only=True),\n cond_imgs=np.array(self.imgs, np.float32),\n cond_poses=np.array(self.poses, np.float32),\n cond_intrinsics=np.array(self.intrinsic, np.float32)\n )" }, { "identifier": "config_parser", "path": "opt.py", "snippet": "def config_parser(cmd=None):\n parser = configargparse.ArgumentParser()\n # experiment\n parser.add_argument('--load-image', type=str, default=None,\n help='zero123pp image path')\n parser.add_argument(\"--proj-name\", type=str, default=\"test\",\n help='experiment name')\n parser.add_argument(\"--wandb-project\", type=str, \n default=\"zerorf\", help='wandb project name')\n \n # data\n parser.add_argument(\"--dataset\", type=str, \n default=\"nerf_syn\", help='type of dataset')\n parser.add_argument(\"--data-dir\", type=str, \n default=\"/root/nerf_synthetic\", help='directory of the dataset')\n parser.add_argument(\"--obj\", type=str, \n default=\"chair\", help='object name')\n parser.add_argument(\"--n-views\", type=int, \n default=6, help='number of input views')\n \n # model\n parser.add_argument(\"--model-res\", type=int, \n default=20, help='noise resolution (should be about 1/40 the provided image resolution), ignored when load-image is set')\n parser.add_argument(\"--model-ch\", type=int, \n default=8, help='noise channel')\n parser.add_argument(\"--n-rays-init\", type=int, \n default=2**12, help='number of rays per batch initially')\n parser.add_argument(\"--n-rays-up\", type=int, \n default=2**16, help='number of rays per batch after 100 iterations')\n parser.add_argument(\"--learn-bg\", action='store_true', help='if learn background')\n parser.add_argument(\"--bg-color\", type=float, \n default=1.0, help='background color')\n parser.add_argument(\"--rep\", type=str, choices=['dif', 'tensorf'],\n default=\"dif\", help=\"representation to use\")\n \n # training\n parser.add_argument(\"--net-lr\", type=float, \n default=0.002, help='learning rate')\n parser.add_argument(\"--seed\", type=int, \n default=1337, help='random seed')\n parser.add_argument(\"--n-val\", type=int, \n default=1, help='number of validate views')\n parser.add_argument(\"--net-lr-decay-to\", type=float, \n default=0.002, help='lr decay rate')\n parser.add_argument(\"--n-iters\", type=int, \n default=10000, help='number of iterations')\n parser.add_argument(\"--val-iter\", type=int, \n default=1000, help='valid every k iterations')\n parser.add_argument(\"--device\", type=str, \n default=\"cuda:0\", help='device name')\n \n if cmd is not None:\n return parser.parse_args(cmd)\n else:\n return parser.parse_args()" } ]
import sys import shutil import os import cv2 import tqdm import json import numpy import wandb import torch import torch_redstone as rst import einops from sklearn.cluster import KMeans from lib.models.autoencoders import MultiSceneNeRF from mmgen.models import build_model, build_module from lib.core.optimizer import build_optimizers from lib.core.ssdnerf_gui import OrbitCamera from lib.datasets.nerf_synthetic import NerfSynthetic from lib.datasets.oppo import OppoDataset from PIL import Image from opt import config_parser from pprint import pprint
7,522
_, b, h, w, c = images.shape x, y = w / 2, h / 2 focal_length = y / numpy.tan(meta['fovy'] / 2) intrinsics = numpy.array([[focal_length, focal_length, x, y]] * args.n_views) work_dir = "results/%s" % args.proj_name os.makedirs(work_dir, exist_ok=True) os.chdir(work_dir) if not args.load_image: if args.dataset == "nerf_syn": model_scale = dict(chair=2.1, drums=2.3, ficus=2.3, hotdog=3.0, lego=2.4, materials=2.4, mic=2.5, ship=2.75) world_scale = 2 / model_scale[args.obj] dataset = NerfSynthetic([f"{args.data_dir}/{args.obj}/transforms_train.json"], rgba=True, world_scale=world_scale) val = NerfSynthetic([f"{args.data_dir}/{args.obj}/transforms_val.json"], world_scale=world_scale) test = NerfSynthetic([f"{args.data_dir}/{args.obj}/transforms_test.json"], world_scale=world_scale) entry = dataset[0] selected_idxs = kmeans_downsample(entry['cond_poses'][..., :3, 3], args.n_views) elif args.dataset == "oi": world_scale = 5.0 dataset = OppoDataset(f"{args.data_dir}/{args.obj}/output", split='train', world_scale=world_scale, rgba=True) val = OppoDataset(f"{args.data_dir}/{args.obj}/output", split='test', world_scale=world_scale) test = OppoDataset(f"{args.data_dir}/{args.obj}/output", split='test', world_scale=world_scale) entry = dataset[0] if args.n_views == 6: selected_idxs = [10, 3, 19, 22, 17, 35] elif args.n_views == 4: selected_idxs = [10, 33, 35, 6] else: selected_idxs = kmeans_downsample(entry['cond_poses'][..., :3, 3], args.n_views) data_entry = dict( cond_imgs=torch.tensor(entry['cond_imgs'][selected_idxs][None]).float().to(device), cond_poses=torch.tensor(entry['cond_poses'])[selected_idxs][None].float().to(device), cond_intrinsics=torch.tensor(entry['cond_intrinsics'])[selected_idxs][None].float().to(device), scene_id=[0], scene_name=[args.proj_name] ) entry = val[0] val_entry = dict( test_imgs=torch.tensor(entry['cond_imgs'][:args.n_val][None]).float().to(device), test_poses=torch.tensor(entry['cond_poses'][:args.n_val])[None].float().to(device), test_intrinsics=torch.tensor(entry['cond_intrinsics'][:args.n_val])[None].float().to(device), scene_id=[0], scene_name=[args.proj_name] ) entry = test[0] test_entry = dict( test_imgs=torch.tensor(entry['cond_imgs'][:][None]).float().to(device), test_poses=torch.tensor(entry['cond_poses'][:])[None].float().to(device), test_intrinsics=torch.tensor(entry['cond_intrinsics'][:])[None].float().to(device), scene_id=[0], scene_name=[args.proj_name] ) else: data_entry = dict( cond_imgs=images, cond_poses=torch.tensor(poses)[None].float().to(device) * 0.9, cond_intrinsics=torch.tensor(intrinsics)[None].float().to(device), scene_id=[0], scene_name=[args.proj_name] ) selected_idxs = list(range(args.n_views)) pic_h = data_entry['cond_imgs'].shape[-3] pic_w = data_entry['cond_imgs'].shape[-2] if args.load_image: args.model_res = 4 pic_h = pic_w = 320 cam = OrbitCamera('render', pic_w, pic_h, 3.2, 48) decoder_1 = dict( type='TensorialDecoder', preprocessor=dict( type='TensorialGenerator', in_ch=args.model_ch, out_ch=16, noise_res=args.model_res, tensor_config=( ['xy', 'z', 'yz', 'x', 'zx', 'y'] ) ), subreduce=1 if args.load_image else 2, reduce='cat', separate_density_and_color=False, sh_coef_only=False, sdf_mode=False, max_steps=1024 if not args.load_image else 320, n_images=args.n_views, image_h=pic_h, image_w=pic_w, has_time_dynamics=False, visualize_mesh=True ) decoder_2 = dict( type='FreqFactorizedDecoder', preprocessor=dict( type='TensorialGenerator', in_ch=args.model_ch, out_ch=16, noise_res=args.model_res, tensor_config=['xyz', 'xyz'] ), subreduce=1, reduce='cat', separate_density_and_color=False, sh_coef_only=False, sdf_mode=False, max_steps=1024 if not args.load_image else 640, n_images=args.n_views, image_h=pic_h, image_w=pic_w, has_time_dynamics=False, freq_bands=[None, 0.4], visualize_mesh=True ) patch_reg_loss = build_module(dict( type='MaskedTVLoss', power=1.5, loss_weight=0.00 ))
sys.path.append('.') torch.backends.cuda.matmul.allow_tf32 = True def kmeans_downsample(points, n_points_to_sample): kmeans = KMeans(n_points_to_sample).fit(points) return ((points - kmeans.cluster_centers_[..., None, :]) ** 2).sum(-1).argmin(-1).tolist() args = config_parser() pprint(args) model_scaling_factor = 16 device = args.device BLENDER_TO_OPENCV_MATRIX = numpy.array([ [1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1] ], dtype=numpy.float32) code_size = (3, args.model_ch, args.model_res, args.model_res) rst.seed(args.seed) poses = [] intrinsics = [] if args.load_image: image = numpy.array(Image.open(args.load_image)).astype(numpy.float32) / 255.0 image = torch.tensor(image).cuda() images = einops.rearrange(image, '(ph h) (pw w) c -> (ph pw) h w c', ph=3, pw=2)[None] meta = json.load(open(os.path.join(os.path.dirname(__file__), "meta.json"))) poses = numpy.array([ (numpy.array(frame['transform_matrix']) @ BLENDER_TO_OPENCV_MATRIX) * 2 for frame in meta['sample_0']['view_frames'] ]) _, b, h, w, c = images.shape x, y = w / 2, h / 2 focal_length = y / numpy.tan(meta['fovy'] / 2) intrinsics = numpy.array([[focal_length, focal_length, x, y]] * args.n_views) work_dir = "results/%s" % args.proj_name os.makedirs(work_dir, exist_ok=True) os.chdir(work_dir) if not args.load_image: if args.dataset == "nerf_syn": model_scale = dict(chair=2.1, drums=2.3, ficus=2.3, hotdog=3.0, lego=2.4, materials=2.4, mic=2.5, ship=2.75) world_scale = 2 / model_scale[args.obj] dataset = NerfSynthetic([f"{args.data_dir}/{args.obj}/transforms_train.json"], rgba=True, world_scale=world_scale) val = NerfSynthetic([f"{args.data_dir}/{args.obj}/transforms_val.json"], world_scale=world_scale) test = NerfSynthetic([f"{args.data_dir}/{args.obj}/transforms_test.json"], world_scale=world_scale) entry = dataset[0] selected_idxs = kmeans_downsample(entry['cond_poses'][..., :3, 3], args.n_views) elif args.dataset == "oi": world_scale = 5.0 dataset = OppoDataset(f"{args.data_dir}/{args.obj}/output", split='train', world_scale=world_scale, rgba=True) val = OppoDataset(f"{args.data_dir}/{args.obj}/output", split='test', world_scale=world_scale) test = OppoDataset(f"{args.data_dir}/{args.obj}/output", split='test', world_scale=world_scale) entry = dataset[0] if args.n_views == 6: selected_idxs = [10, 3, 19, 22, 17, 35] elif args.n_views == 4: selected_idxs = [10, 33, 35, 6] else: selected_idxs = kmeans_downsample(entry['cond_poses'][..., :3, 3], args.n_views) data_entry = dict( cond_imgs=torch.tensor(entry['cond_imgs'][selected_idxs][None]).float().to(device), cond_poses=torch.tensor(entry['cond_poses'])[selected_idxs][None].float().to(device), cond_intrinsics=torch.tensor(entry['cond_intrinsics'])[selected_idxs][None].float().to(device), scene_id=[0], scene_name=[args.proj_name] ) entry = val[0] val_entry = dict( test_imgs=torch.tensor(entry['cond_imgs'][:args.n_val][None]).float().to(device), test_poses=torch.tensor(entry['cond_poses'][:args.n_val])[None].float().to(device), test_intrinsics=torch.tensor(entry['cond_intrinsics'][:args.n_val])[None].float().to(device), scene_id=[0], scene_name=[args.proj_name] ) entry = test[0] test_entry = dict( test_imgs=torch.tensor(entry['cond_imgs'][:][None]).float().to(device), test_poses=torch.tensor(entry['cond_poses'][:])[None].float().to(device), test_intrinsics=torch.tensor(entry['cond_intrinsics'][:])[None].float().to(device), scene_id=[0], scene_name=[args.proj_name] ) else: data_entry = dict( cond_imgs=images, cond_poses=torch.tensor(poses)[None].float().to(device) * 0.9, cond_intrinsics=torch.tensor(intrinsics)[None].float().to(device), scene_id=[0], scene_name=[args.proj_name] ) selected_idxs = list(range(args.n_views)) pic_h = data_entry['cond_imgs'].shape[-3] pic_w = data_entry['cond_imgs'].shape[-2] if args.load_image: args.model_res = 4 pic_h = pic_w = 320 cam = OrbitCamera('render', pic_w, pic_h, 3.2, 48) decoder_1 = dict( type='TensorialDecoder', preprocessor=dict( type='TensorialGenerator', in_ch=args.model_ch, out_ch=16, noise_res=args.model_res, tensor_config=( ['xy', 'z', 'yz', 'x', 'zx', 'y'] ) ), subreduce=1 if args.load_image else 2, reduce='cat', separate_density_and_color=False, sh_coef_only=False, sdf_mode=False, max_steps=1024 if not args.load_image else 320, n_images=args.n_views, image_h=pic_h, image_w=pic_w, has_time_dynamics=False, visualize_mesh=True ) decoder_2 = dict( type='FreqFactorizedDecoder', preprocessor=dict( type='TensorialGenerator', in_ch=args.model_ch, out_ch=16, noise_res=args.model_res, tensor_config=['xyz', 'xyz'] ), subreduce=1, reduce='cat', separate_density_and_color=False, sh_coef_only=False, sdf_mode=False, max_steps=1024 if not args.load_image else 640, n_images=args.n_views, image_h=pic_h, image_w=pic_w, has_time_dynamics=False, freq_bands=[None, 0.4], visualize_mesh=True ) patch_reg_loss = build_module(dict( type='MaskedTVLoss', power=1.5, loss_weight=0.00 ))
nerf: MultiSceneNeRF = build_model(dict(
0
2023-12-14 03:29:28+00:00
12k
u2seg/U2Seg
detectron2/data/datasets/builtin.py
[ { "identifier": "DatasetCatalog", "path": "detectron2/data/catalog.py", "snippet": "class _DatasetCatalog(UserDict):\nclass Metadata(types.SimpleNamespace):\nclass _MetadataCatalog(UserDict):\n def register(self, name, func):\n def get(self, name):\n def list(self) -> List[str]:\n def remove(self, name):\n def __str__(self):\n def __getattr__(self, key):\n def __setattr__(self, key, val):\n def as_dict(self):\n def set(self, **kwargs):\n def get(self, key, default=None):\n def get(self, name):\n def list(self):\n def remove(self, name):\n def __str__(self):\n _RENAMED = {\n \"class_names\": \"thing_classes\",\n \"dataset_id_to_contiguous_id\": \"thing_dataset_id_to_contiguous_id\",\n \"stuff_class_names\": \"stuff_classes\",\n }" }, { "identifier": "ADE20K_SEM_SEG_CATEGORIES", "path": "detectron2/data/datasets/builtin_meta.py", "snippet": "ADE20K_SEM_SEG_CATEGORIES = [\n \"wall\", \"building\", \"sky\", \"floor\", \"tree\", \"ceiling\", \"road, route\", \"bed\", \"window \", \"grass\", \"cabinet\", \"sidewalk, pavement\", \"person\", \"earth, ground\", \"door\", \"table\", \"mountain, mount\", \"plant\", \"curtain\", \"chair\", \"car\", \"water\", \"painting, picture\", \"sofa\", \"shelf\", \"house\", \"sea\", \"mirror\", \"rug\", \"field\", \"armchair\", \"seat\", \"fence\", \"desk\", \"rock, stone\", \"wardrobe, closet, press\", \"lamp\", \"tub\", \"rail\", \"cushion\", \"base, pedestal, stand\", \"box\", \"column, pillar\", \"signboard, sign\", \"chest of drawers, chest, bureau, dresser\", \"counter\", \"sand\", \"sink\", \"skyscraper\", \"fireplace\", \"refrigerator, icebox\", \"grandstand, covered stand\", \"path\", \"stairs\", \"runway\", \"case, display case, showcase, vitrine\", \"pool table, billiard table, snooker table\", \"pillow\", \"screen door, screen\", \"stairway, staircase\", \"river\", \"bridge, span\", \"bookcase\", \"blind, screen\", \"coffee table\", \"toilet, can, commode, crapper, pot, potty, stool, throne\", \"flower\", \"book\", \"hill\", \"bench\", \"countertop\", \"stove\", \"palm, palm tree\", \"kitchen island\", \"computer\", \"swivel chair\", \"boat\", \"bar\", \"arcade machine\", \"hovel, hut, hutch, shack, shanty\", \"bus\", \"towel\", \"light\", \"truck\", \"tower\", \"chandelier\", \"awning, sunshade, sunblind\", \"street lamp\", \"booth\", \"tv\", \"plane\", \"dirt track\", \"clothes\", \"pole\", \"land, ground, soil\", \"bannister, banister, balustrade, balusters, handrail\", \"escalator, moving staircase, moving stairway\", \"ottoman, pouf, pouffe, puff, hassock\", \"bottle\", \"buffet, counter, sideboard\", \"poster, posting, placard, notice, bill, card\", \"stage\", \"van\", \"ship\", \"fountain\", \"conveyer belt, conveyor belt, conveyer, conveyor, transporter\", \"canopy\", \"washer, automatic washer, washing machine\", \"plaything, toy\", \"pool\", \"stool\", \"barrel, cask\", \"basket, handbasket\", \"falls\", \"tent\", \"bag\", \"minibike, motorbike\", \"cradle\", \"oven\", \"ball\", \"food, solid food\", \"step, stair\", \"tank, storage tank\", \"trade name\", \"microwave\", \"pot\", \"animal\", \"bicycle\", \"lake\", \"dishwasher\", \"screen\", \"blanket, cover\", \"sculpture\", \"hood, exhaust hood\", \"sconce\", \"vase\", \"traffic light\", \"tray\", \"trash can\", \"fan\", \"pier\", \"crt screen\", \"plate\", \"monitor\", \"bulletin board\", \"shower\", \"radiator\", \"glass, drinking glass\", \"clock\", \"flag\", # noqa\n]" }, { "identifier": "_get_builtin_metadata", "path": "detectron2/data/datasets/builtin_meta.py", "snippet": "def _get_builtin_metadata(dataset_name):\n if dataset_name in [\"coco\", \"coco_semi\"]:\n return _get_coco_instances_meta()\n if dataset_name == \"coco_panoptic_separated\":\n return _get_coco_panoptic_separated_meta()\n elif dataset_name == \"coco_panoptic_standard\":\n meta = {}\n # The following metadata maps contiguous id from [0, #thing categories +\n # #stuff categories) to their names and colors. We have to replica of the\n # same name and color under \"thing_*\" and \"stuff_*\" because the current\n # visualization function in D2 handles thing and class classes differently\n # due to some heuristic used in Panoptic FPN. We keep the same naming to\n # enable reusing existing visualization functions.\n thing_classes = [k[\"name\"] for k in COCO_CATEGORIES]\n thing_colors = [k[\"color\"] for k in COCO_CATEGORIES]\n stuff_classes = [k[\"name\"] for k in COCO_CATEGORIES]\n stuff_colors = [k[\"color\"] for k in COCO_CATEGORIES]\n\n meta[\"thing_classes\"] = thing_classes\n meta[\"thing_colors\"] = thing_colors\n meta[\"stuff_classes\"] = stuff_classes\n meta[\"stuff_colors\"] = stuff_colors\n\n # Convert category id for training:\n # category id: like semantic segmentation, it is the class id for each\n # pixel. Since there are some classes not used in evaluation, the category\n # id is not always contiguous and thus we have two set of category ids:\n # - original category id: category id in the original dataset, mainly\n # used for evaluation.\n # - contiguous category id: [0, #classes), in order to train the linear\n # softmax classifier.\n thing_dataset_id_to_contiguous_id = {}\n stuff_dataset_id_to_contiguous_id = {}\n\n for i, cat in enumerate(COCO_CATEGORIES):\n if cat[\"isthing\"]:\n thing_dataset_id_to_contiguous_id[cat[\"id\"]] = i\n else:\n stuff_dataset_id_to_contiguous_id[cat[\"id\"]] = i\n\n meta[\"thing_dataset_id_to_contiguous_id\"] = thing_dataset_id_to_contiguous_id\n meta[\"stuff_dataset_id_to_contiguous_id\"] = stuff_dataset_id_to_contiguous_id\n\n return meta\n elif dataset_name == \"coco_person\":\n return {\n \"thing_classes\": [\"person\"],\n \"keypoint_names\": COCO_PERSON_KEYPOINT_NAMES,\n \"keypoint_flip_map\": COCO_PERSON_KEYPOINT_FLIP_MAP,\n \"keypoint_connection_rules\": KEYPOINT_CONNECTION_RULES,\n }\n elif dataset_name == \"cityscapes\":\n # fmt: off\n CITYSCAPES_THING_CLASSES = [\n \"person\", \"rider\", \"car\", \"truck\",\n \"bus\", \"train\", \"motorcycle\", \"bicycle\",\n ]\n CITYSCAPES_STUFF_CLASSES = [\n \"road\", \"sidewalk\", \"building\", \"wall\", \"fence\", \"pole\", \"traffic light\",\n \"traffic sign\", \"vegetation\", \"terrain\", \"sky\", \"person\", \"rider\", \"car\",\n \"truck\", \"bus\", \"train\", \"motorcycle\", \"bicycle\",\n ]\n # fmt: on\n return {\n \"thing_classes\": CITYSCAPES_THING_CLASSES,\n \"stuff_classes\": CITYSCAPES_STUFF_CLASSES,\n }\n raise KeyError(\"No built-in metadata for dataset {}\".format(dataset_name))" }, { "identifier": "load_cityscapes_instances", "path": "detectron2/data/datasets/cityscapes.py", "snippet": "def load_cityscapes_instances(image_dir, gt_dir, from_json=True, to_polygons=True):\n \"\"\"\n Args:\n image_dir (str): path to the raw dataset. e.g., \"~/cityscapes/leftImg8bit/train\".\n gt_dir (str): path to the raw annotations. e.g., \"~/cityscapes/gtFine/train\".\n from_json (bool): whether to read annotations from the raw json file or the png files.\n to_polygons (bool): whether to represent the segmentation as polygons\n (COCO's format) instead of masks (cityscapes's format).\n\n Returns:\n list[dict]: a list of dicts in Detectron2 standard format. (See\n `Using Custom Datasets </tutorials/datasets.html>`_ )\n \"\"\"\n if from_json:\n assert to_polygons, (\n \"Cityscapes's json annotations are in polygon format. \"\n \"Converting to mask format is not supported now.\"\n )\n files = _get_cityscapes_files(image_dir, gt_dir)\n\n logger.info(\"Preprocessing cityscapes annotations ...\")\n # This is still not fast: all workers will execute duplicate works and will\n # take up to 10m on a 8GPU server.\n pool = mp.Pool(processes=max(mp.cpu_count() // get_world_size() // 2, 4))\n\n ret = pool.map(\n functools.partial(_cityscapes_files_to_dict, from_json=from_json, to_polygons=to_polygons),\n files,\n )\n logger.info(\"Loaded {} images from {}\".format(len(ret), image_dir))\n\n # Map cityscape ids to contiguous ids\n from cityscapesscripts.helpers.labels import labels\n\n labels = [l for l in labels if l.hasInstances and not l.ignoreInEval]\n dataset_id_to_contiguous_id = {l.id: idx for idx, l in enumerate(labels)}\n for dict_per_image in ret:\n for anno in dict_per_image[\"annotations\"]:\n anno[\"category_id\"] = dataset_id_to_contiguous_id[anno[\"category_id\"]]\n return ret" }, { "identifier": "load_cityscapes_semantic", "path": "detectron2/data/datasets/cityscapes.py", "snippet": "def load_cityscapes_semantic(image_dir, gt_dir):\n \"\"\"\n Args:\n image_dir (str): path to the raw dataset. e.g., \"~/cityscapes/leftImg8bit/train\".\n gt_dir (str): path to the raw annotations. e.g., \"~/cityscapes/gtFine/train\".\n\n Returns:\n list[dict]: a list of dict, each has \"file_name\" and\n \"sem_seg_file_name\".\n \"\"\"\n ret = []\n # gt_dir is small and contain many small files. make sense to fetch to local first\n gt_dir = PathManager.get_local_path(gt_dir)\n for image_file, _, label_file, json_file in _get_cityscapes_files(image_dir, gt_dir):\n label_file = label_file.replace(\"labelIds\", \"labelTrainIds\")\n\n with PathManager.open(json_file, \"r\") as f:\n jsonobj = json.load(f)\n ret.append(\n {\n \"file_name\": image_file,\n \"sem_seg_file_name\": label_file,\n \"height\": jsonobj[\"imgHeight\"],\n \"width\": jsonobj[\"imgWidth\"],\n }\n )\n assert len(ret), f\"No images found in {image_dir}!\"\n assert PathManager.isfile(\n ret[0][\"sem_seg_file_name\"]\n ), \"Please generate labelTrainIds.png with cityscapesscripts/preparation/createTrainIdLabelImgs.py\" # noqa\n return ret" }, { "identifier": "register_all_cityscapes_panoptic", "path": "detectron2/data/datasets/cityscapes_panoptic.py", "snippet": "def register_all_cityscapes_panoptic(root):\n meta = {}\n # The following metadata maps contiguous id from [0, #thing categories +\n # #stuff categories) to their names and colors. We have to replica of the\n # same name and color under \"thing_*\" and \"stuff_*\" because the current\n # visualization function in D2 handles thing and class classes differently\n # due to some heuristic used in Panoptic FPN. We keep the same naming to\n # enable reusing existing visualization functions.\n thing_classes = [k[\"name\"] for k in CITYSCAPES_CATEGORIES]\n thing_colors = [k[\"color\"] for k in CITYSCAPES_CATEGORIES]\n stuff_classes = [k[\"name\"] for k in CITYSCAPES_CATEGORIES]\n stuff_colors = [k[\"color\"] for k in CITYSCAPES_CATEGORIES]\n\n meta[\"thing_classes\"] = thing_classes\n meta[\"thing_colors\"] = thing_colors\n meta[\"stuff_classes\"] = stuff_classes\n meta[\"stuff_colors\"] = stuff_colors\n\n # There are three types of ids in cityscapes panoptic segmentation:\n # (1) category id: like semantic segmentation, it is the class id for each\n # pixel. Since there are some classes not used in evaluation, the category\n # id is not always contiguous and thus we have two set of category ids:\n # - original category id: category id in the original dataset, mainly\n # used for evaluation.\n # - contiguous category id: [0, #classes), in order to train the classifier\n # (2) instance id: this id is used to differentiate different instances from\n # the same category. For \"stuff\" classes, the instance id is always 0; for\n # \"thing\" classes, the instance id starts from 1 and 0 is reserved for\n # ignored instances (e.g. crowd annotation).\n # (3) panoptic id: this is the compact id that encode both category and\n # instance id by: category_id * 1000 + instance_id.\n thing_dataset_id_to_contiguous_id = {}\n stuff_dataset_id_to_contiguous_id = {}\n\n for k in CITYSCAPES_CATEGORIES:\n if k[\"isthing\"] == 1:\n thing_dataset_id_to_contiguous_id[k[\"id\"]] = k[\"trainId\"]\n else:\n stuff_dataset_id_to_contiguous_id[k[\"id\"]] = k[\"trainId\"]\n\n meta[\"thing_dataset_id_to_contiguous_id\"] = thing_dataset_id_to_contiguous_id\n meta[\"stuff_dataset_id_to_contiguous_id\"] = stuff_dataset_id_to_contiguous_id\n\n for key, (image_dir, gt_dir, gt_json) in _RAW_CITYSCAPES_PANOPTIC_SPLITS.items():\n image_dir = os.path.join(root, image_dir)\n gt_dir = os.path.join(root, gt_dir)\n gt_json = os.path.join(root, gt_json)\n\n DatasetCatalog.register(\n key, lambda x=image_dir, y=gt_dir, z=gt_json: load_cityscapes_panoptic(x, y, z, meta)\n )\n MetadataCatalog.get(key).set(\n panoptic_root=gt_dir,\n image_root=image_dir,\n panoptic_json=gt_json,\n gt_dir=gt_dir.replace(\"cityscapes_panoptic_\", \"\"),\n evaluator_type=\"cityscapes_panoptic_seg\",\n ignore_label=255,\n label_divisor=1000,\n **meta,\n )" }, { "identifier": "load_sem_seg", "path": "detectron2/data/datasets/coco.py", "snippet": "def load_sem_seg(gt_root, image_root, gt_ext=\"png\", image_ext=\"jpg\"):\n \"\"\"\n Load semantic segmentation datasets. All files under \"gt_root\" with \"gt_ext\" extension are\n treated as ground truth annotations and all files under \"image_root\" with \"image_ext\" extension\n as input images. Ground truth and input images are matched using file paths relative to\n \"gt_root\" and \"image_root\" respectively without taking into account file extensions.\n This works for COCO as well as some other datasets.\n\n Args:\n gt_root (str): full path to ground truth semantic segmentation files. Semantic segmentation\n annotations are stored as images with integer values in pixels that represent\n corresponding semantic labels.\n image_root (str): the directory where the input images are.\n gt_ext (str): file extension for ground truth annotations.\n image_ext (str): file extension for input images.\n\n Returns:\n list[dict]:\n a list of dicts in detectron2 standard format without instance-level\n annotation.\n\n Notes:\n 1. This function does not read the image and ground truth files.\n The results do not have the \"image\" and \"sem_seg\" fields.\n \"\"\"\n\n # We match input images with ground truth based on their relative filepaths (without file\n # extensions) starting from 'image_root' and 'gt_root' respectively.\n def file2id(folder_path, file_path):\n # extract relative path starting from `folder_path`\n image_id = os.path.normpath(os.path.relpath(file_path, start=folder_path))\n # remove file extension\n image_id = os.path.splitext(image_id)[0]\n return image_id\n\n input_files = sorted(\n (os.path.join(image_root, f) for f in PathManager.ls(image_root) if f.endswith(image_ext)),\n key=lambda file_path: file2id(image_root, file_path),\n )\n gt_files = sorted(\n (os.path.join(gt_root, f) for f in PathManager.ls(gt_root) if f.endswith(gt_ext)),\n key=lambda file_path: file2id(gt_root, file_path),\n )\n\n assert len(gt_files) > 0, \"No annotations found in {}.\".format(gt_root)\n\n # Use the intersection, so that val2017_100 annotations can run smoothly with val2017 images\n if len(input_files) != len(gt_files):\n logger.warn(\n \"Directory {} and {} has {} and {} files, respectively.\".format(\n image_root, gt_root, len(input_files), len(gt_files)\n )\n )\n input_basenames = [os.path.basename(f)[: -len(image_ext)] for f in input_files]\n gt_basenames = [os.path.basename(f)[: -len(gt_ext)] for f in gt_files]\n intersect = list(set(input_basenames) & set(gt_basenames))\n # sort, otherwise each worker may obtain a list[dict] in different order\n intersect = sorted(intersect)\n logger.warn(\"Will use their intersection of {} files.\".format(len(intersect)))\n input_files = [os.path.join(image_root, f + image_ext) for f in intersect]\n gt_files = [os.path.join(gt_root, f + gt_ext) for f in intersect]\n\n logger.info(\n \"Loaded {} images with semantic segmentation from {}\".format(len(input_files), image_root)\n )\n\n dataset_dicts = []\n for (img_path, gt_path) in zip(input_files, gt_files):\n record = {}\n record[\"file_name\"] = img_path\n record[\"sem_seg_file_name\"] = gt_path\n dataset_dicts.append(record)\n\n return dataset_dicts" }, { "identifier": "register_coco_instances", "path": "detectron2/data/datasets/coco.py", "snippet": "def register_coco_instances(name, metadata, json_file, image_root):\n \"\"\"\n Register a dataset in COCO's json annotation format for\n instance detection, instance segmentation and keypoint detection.\n (i.e., Type 1 and 2 in http://cocodataset.org/#format-data.\n `instances*.json` and `person_keypoints*.json` in the dataset).\n\n This is an example of how to register a new dataset.\n You can do something similar to this function, to register new datasets.\n\n Args:\n name (str): the name that identifies a dataset, e.g. \"coco_2014_train\".\n metadata (dict): extra metadata associated with this dataset. You can\n leave it as an empty dict.\n json_file (str): path to the json instance annotation file.\n image_root (str or path-like): directory which contains all the images.\n \"\"\"\n assert isinstance(name, str), name\n assert isinstance(json_file, (str, os.PathLike)), json_file\n assert isinstance(image_root, (str, os.PathLike)), image_root\n # 1. register a function which returns dicts\n DatasetCatalog.register(name, lambda: load_coco_json(json_file, image_root, name))\n\n # 2. Optionally, add metadata about this dataset,\n # since they might be useful in evaluation, visualization or logging\n MetadataCatalog.get(name).set(\n json_file=json_file, image_root=image_root, evaluator_type=\"coco\", **metadata\n )" }, { "identifier": "register_coco_panoptic", "path": "detectron2/data/datasets/coco_panoptic.py", "snippet": "def register_coco_panoptic(\n name, metadata, image_root, panoptic_root, panoptic_json, instances_json=None\n):\n \"\"\"\n Register a \"standard\" version of COCO panoptic segmentation dataset named `name`.\n The dictionaries in this registered dataset follows detectron2's standard format.\n Hence it's called \"standard\".\n\n Args:\n name (str): the name that identifies a dataset,\n e.g. \"coco_2017_train_panoptic\"\n metadata (dict): extra metadata associated with this dataset.\n image_root (str): directory which contains all the images\n panoptic_root (str): directory which contains panoptic annotation images in COCO format\n panoptic_json (str): path to the json panoptic annotation file in COCO format\n sem_seg_root (none): not used, to be consistent with\n `register_coco_panoptic_separated`.\n instances_json (str): path to the json instance annotation file\n \"\"\"\n panoptic_name = name\n DatasetCatalog.register(\n panoptic_name,\n lambda: load_coco_panoptic_json(panoptic_json, image_root, panoptic_root, metadata),\n )\n MetadataCatalog.get(panoptic_name).set(\n panoptic_root=panoptic_root,\n image_root=image_root,\n panoptic_json=panoptic_json,\n json_file=instances_json,\n evaluator_type=\"coco_panoptic_seg\",\n ignore_label=255,\n label_divisor=1000,\n **metadata,\n )" }, { "identifier": "register_coco_panoptic_separated", "path": "detectron2/data/datasets/coco_panoptic.py", "snippet": "def register_coco_panoptic_separated(\n name, metadata, image_root, panoptic_root, panoptic_json, sem_seg_root, instances_json\n):\n \"\"\"\n Register a \"separated\" version of COCO panoptic segmentation dataset named `name`.\n The annotations in this registered dataset will contain both instance annotations and\n semantic annotations, each with its own contiguous ids. Hence it's called \"separated\".\n\n It follows the setting used by the PanopticFPN paper:\n\n 1. The instance annotations directly come from polygons in the COCO\n instances annotation task, rather than from the masks in the COCO panoptic annotations.\n\n The two format have small differences:\n Polygons in the instance annotations may have overlaps.\n The mask annotations are produced by labeling the overlapped polygons\n with depth ordering.\n\n 2. The semantic annotations are converted from panoptic annotations, where\n all \"things\" are assigned a semantic id of 0.\n All semantic categories will therefore have ids in contiguous\n range [1, #stuff_categories].\n\n This function will also register a pure semantic segmentation dataset\n named ``name + '_stuffonly'``.\n\n Args:\n name (str): the name that identifies a dataset,\n e.g. \"coco_2017_train_panoptic\"\n metadata (dict): extra metadata associated with this dataset.\n image_root (str): directory which contains all the images\n panoptic_root (str): directory which contains panoptic annotation images\n panoptic_json (str): path to the json panoptic annotation file\n sem_seg_root (str): directory which contains all the ground truth segmentation annotations.\n instances_json (str): path to the json instance annotation file\n \"\"\"\n panoptic_name = name + \"_separated\"\n DatasetCatalog.register(\n panoptic_name,\n lambda: merge_to_panoptic(\n load_coco_json(instances_json, image_root, panoptic_name),\n load_sem_seg(sem_seg_root, image_root),\n ),\n )\n MetadataCatalog.get(panoptic_name).set(\n panoptic_root=panoptic_root,\n image_root=image_root,\n panoptic_json=panoptic_json,\n sem_seg_root=sem_seg_root,\n json_file=instances_json, # TODO rename\n evaluator_type=\"coco_panoptic_seg\",\n ignore_label=255,\n **metadata,\n )\n\n semantic_name = name + \"_stuffonly\"\n DatasetCatalog.register(semantic_name, lambda: load_sem_seg(sem_seg_root, image_root))\n MetadataCatalog.get(semantic_name).set(\n sem_seg_root=sem_seg_root,\n image_root=image_root,\n evaluator_type=\"sem_seg\",\n ignore_label=255,\n **metadata,\n )" }, { "identifier": "get_lvis_instances_meta", "path": "detectron2/data/datasets/lvis.py", "snippet": "def get_lvis_instances_meta(dataset_name):\n \"\"\"\n Load LVIS metadata.\n\n Args:\n dataset_name (str): LVIS dataset name without the split name (e.g., \"lvis_v0.5\").\n\n Returns:\n dict: LVIS metadata with keys: thing_classes\n \"\"\"\n if \"cocofied\" in dataset_name:\n return _get_coco_instances_meta()\n if \"v0.5\" in dataset_name:\n return _get_lvis_instances_meta_v0_5()\n elif \"v1\" in dataset_name:\n return _get_lvis_instances_meta_v1()\n raise ValueError(\"No built-in metadata for dataset {}\".format(dataset_name))" }, { "identifier": "register_lvis_instances", "path": "detectron2/data/datasets/lvis.py", "snippet": "def register_lvis_instances(name, metadata, json_file, image_root):\n \"\"\"\n Register a dataset in LVIS's json annotation format for instance detection and segmentation.\n\n Args:\n name (str): a name that identifies the dataset, e.g. \"lvis_v0.5_train\".\n metadata (dict): extra metadata associated with this dataset. It can be an empty dict.\n json_file (str): path to the json instance annotation file.\n image_root (str or path-like): directory which contains all the images.\n \"\"\"\n DatasetCatalog.register(name, lambda: load_lvis_json(json_file, image_root, name))\n MetadataCatalog.get(name).set(\n json_file=json_file, image_root=image_root, evaluator_type=\"lvis\", **metadata\n )" }, { "identifier": "register_pascal_voc", "path": "detectron2/data/datasets/pascal_voc.py", "snippet": "def register_pascal_voc(name, dirname, split, year, class_names=CLASS_NAMES):\n DatasetCatalog.register(name, lambda: load_voc_instances(dirname, split, class_names))\n MetadataCatalog.get(name).set(\n thing_classes=list(class_names), dirname=dirname, year=year, split=split\n )" } ]
import os from detectron2.data import DatasetCatalog, MetadataCatalog from .builtin_meta import ADE20K_SEM_SEG_CATEGORIES, _get_builtin_metadata from .cityscapes import load_cityscapes_instances, load_cityscapes_semantic from .cityscapes_panoptic import register_all_cityscapes_panoptic from .coco import load_sem_seg, register_coco_instances from .coco_panoptic import register_coco_panoptic, register_coco_panoptic_separated from .lvis import get_lvis_instances_meta, register_lvis_instances from .pascal_voc import register_pascal_voc
9,656
instances_json, ) # ==== Predefined datasets and splits for LVIS ========== _PREDEFINED_SPLITS_LVIS = { "lvis_v1": { "lvis_v1_train": ("coco/", "lvis/lvis_v1_train.json"), "lvis_v1_val": ("coco/", "lvis/lvis_v1_val.json"), "lvis_v1_test_dev": ("coco/", "lvis/lvis_v1_image_info_test_dev.json"), "lvis_v1_test_challenge": ("coco/", "lvis/lvis_v1_image_info_test_challenge.json"), }, "lvis_v0.5": { "lvis_v0.5_train": ("coco/", "lvis/lvis_v0.5_train.json"), "lvis_v0.5_val": ("coco/", "lvis/lvis_v0.5_val.json"), "lvis_v0.5_val_rand_100": ("coco/", "lvis/lvis_v0.5_val_rand_100.json"), "lvis_v0.5_test": ("coco/", "lvis/lvis_v0.5_image_info_test.json"), }, "lvis_v0.5_cocofied": { "lvis_v0.5_train_cocofied": ("coco/", "lvis/lvis_v0.5_train_cocofied.json"), "lvis_v0.5_val_cocofied": ("coco/", "lvis/lvis_v0.5_val_cocofied.json"), }, } def register_all_lvis(root): for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_LVIS.items(): for key, (image_root, json_file) in splits_per_dataset.items(): register_lvis_instances( key, get_lvis_instances_meta(dataset_name), os.path.join(root, json_file) if "://" not in json_file else json_file, os.path.join(root, image_root), ) # ==== Predefined splits for raw cityscapes images =========== _RAW_CITYSCAPES_SPLITS = { "cityscapes_fine_{task}_train": ("cityscapes/leftImg8bit/train/", "cityscapes/gtFine/train/"), "cityscapes_fine_{task}_val": ("cityscapes/leftImg8bit/val/", "cityscapes/gtFine/val/"), "cityscapes_fine_{task}_test": ("cityscapes/leftImg8bit/test/", "cityscapes/gtFine/test/"), } def register_all_cityscapes(root): for key, (image_dir, gt_dir) in _RAW_CITYSCAPES_SPLITS.items(): meta = _get_builtin_metadata("cityscapes") image_dir = os.path.join(root, image_dir) gt_dir = os.path.join(root, gt_dir) inst_key = key.format(task="instance_seg") DatasetCatalog.register( inst_key, lambda x=image_dir, y=gt_dir: load_cityscapes_instances( x, y, from_json=True, to_polygons=True ), ) MetadataCatalog.get(inst_key).set( image_dir=image_dir, gt_dir=gt_dir, evaluator_type="cityscapes_instance", **meta ) sem_key = key.format(task="sem_seg") DatasetCatalog.register( sem_key, lambda x=image_dir, y=gt_dir: load_cityscapes_semantic(x, y) ) MetadataCatalog.get(sem_key).set( image_dir=image_dir, gt_dir=gt_dir, evaluator_type="cityscapes_sem_seg", ignore_label=255, **meta, ) # ==== Predefined splits for PASCAL VOC =========== def register_all_pascal_voc(root): SPLITS = [ ("voc_2007_trainval", "VOC2007", "trainval"), ("voc_2007_train", "VOC2007", "train"), ("voc_2007_val", "VOC2007", "val"), ("voc_2007_test", "VOC2007", "test"), ("voc_2012_trainval", "VOC2012", "trainval"), ("voc_2012_train", "VOC2012", "train"), ("voc_2012_val", "VOC2012", "val"), ] for name, dirname, split in SPLITS: year = 2007 if "2007" in name else 2012 register_pascal_voc(name, os.path.join(root, dirname), split, year) MetadataCatalog.get(name).evaluator_type = "pascal_voc" def register_all_ade20k(root): root = os.path.join(root, "ADEChallengeData2016") for name, dirname in [("train", "training"), ("val", "validation")]: image_dir = os.path.join(root, "images", dirname) gt_dir = os.path.join(root, "annotations_detectron2", dirname) name = f"ade20k_sem_seg_{name}" DatasetCatalog.register( name, lambda x=image_dir, y=gt_dir: load_sem_seg(y, x, gt_ext="png", image_ext="jpg") ) MetadataCatalog.get(name).set( stuff_classes=ADE20K_SEM_SEG_CATEGORIES[:], image_root=image_dir, sem_seg_root=gt_dir, evaluator_type="sem_seg", ignore_label=255, ) # True for open source; # Internally at fb, we register them elsewhere if __name__.endswith(".builtin"): # Assume pre-defined datasets live in `./datasets`. _root = "datasets" # _root = os.path.expanduser(os.getenv("DETECTRON2_DATASETS", "datasets")) register_all_coco(_root) register_all_lvis(_root) register_all_cityscapes(_root)
# -*- coding: utf-8 -*- # Copyright (c) Facebook, Inc. and its affiliates. """ This file registers pre-defined datasets at hard-coded paths, and their metadata. We hard-code metadata for common datasets. This will enable: 1. Consistency check when loading the datasets 2. Use models on these standard datasets directly and run demos, without having to download the dataset annotations We hard-code some paths to the dataset that's assumed to exist in "./datasets/". Users SHOULD NOT use this file to create new dataset / metadata for new dataset. To add new dataset, refer to the tutorial "docs/DATASETS.md". """ # ==== Predefined datasets and splits for COCO ========== cluster_num = os.getenv('CLUSTER_NUM', '800') _PREDEFINED_SPLITS_COCO_SEMI = {} _PREDEFINED_SPLITS_COCO_SEMI["coco_semi"] = { # we use seed 42 to be consistent with previous works on SSL detection and segmentation "coco_semi_1perc": ("/shared/group/coco/train2017", "/shared/niudt/DATASET/coco/annotations/coco-semi/1perc_instances_train2017.json"), "coco_semi_2perc": ("/shared/group/coco/train2017", "/shared/niudt/DATASET/coco/annotations/coco-semi/2perc_instances_train2017.json"), "coco_semi_5perc": ("/shared/group/coco/train2017", "/shared/niudt/DATASET/coco/annotations/coco-semi/5perc_instances_train2017.json"), "coco_semi_10perc": ("/shared/group/coco/train2017", "/shared/niudt/DATASET/coco/annotations/coco-semi/10perc_instances_train2017.json"), "coco_semi_20perc": ("/shared/group/coco/train2017", "/shared/niudt/DATASET/coco/annotations/coco-semi/20perc_instances_train2017.json"), "coco_semi_30perc": ("/shared/group/coco/train2017", "/shared/niudt/DATASET/coco/annotations/coco-semi/30perc_instances_train2017.json"), "coco_semi_40perc": ("/shared/group/coco/train2017", "/shared/niudt/DATASET/coco/annotations/coco-semi/40perc_instances_train2017.json"), "coco_semi_50perc": ("/shared/group/coco/train2017", "/shared/niudt/DATASET/coco/annotations/coco-semi/50perc_instances_train2017.json"), } def register_all_coco_semi(root): for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_COCO_SEMI.items(): for key, (image_root, json_file) in splits_per_dataset.items(): # Assume pre-defined datasets live in `./datasets`. register_coco_instances( key, _get_builtin_metadata(dataset_name), os.path.join(root, json_file) if "://" not in json_file else json_file, os.path.join(root, image_root), ) _PREDEFINED_SPLITS_COCO = {} _PREDEFINED_SPLITS_COCO["coco"] = { "coco_2014_train": ("coco/train2014", "coco/annotations/instances_train2014.json"), "coco_2014_val": ("coco/val2014", "coco/annotations/instances_val2014.json"), "coco_2014_minival": ("coco/val2014", "coco/annotations/instances_minival2014.json"), "coco_2014_valminusminival": ( "coco/val2014", "coco/annotations/instances_valminusminival2014.json", ), "coco_2017_train": ("./coco/train2017", f"./prepare_ours/u2seg_annotations/ins_annotations/cocotrain_{cluster_num}.json"), "coco_2017_val": ("./coco/val2017", "./coco/annotations/instances_val2017.json"), "coco_2017_test": ("coco/test2017", "coco/annotations/image_info_test2017.json"), "coco_2017_test-dev": ("coco/test2017", "coco/annotations/image_info_test-dev2017.json"), "coco_2017_val_100": ("coco/val2017", "coco/annotations/instances_val2017_100.json"), } _PREDEFINED_SPLITS_COCO["coco_person"] = { "keypoints_coco_2014_train": ( "coco/train2014", "coco/annotations/person_keypoints_train2014.json", ), "keypoints_coco_2014_val": ("coco/val2014", "coco/annotations/person_keypoints_val2014.json"), "keypoints_coco_2014_minival": ( "coco/val2014", "coco/annotations/person_keypoints_minival2014.json", ), "keypoints_coco_2014_valminusminival": ( "coco/val2014", "coco/annotations/person_keypoints_valminusminival2014.json", ), "keypoints_coco_2017_train": ( "coco/train2017", "coco/annotations/person_keypoints_train2017.json", ), "keypoints_coco_2017_val": ("coco/val2017", "coco/annotations/person_keypoints_val2017.json"), "keypoints_coco_2017_val_100": ( "coco/val2017", "coco/annotations/person_keypoints_val2017_100.json", ), } _PREDEFINED_SPLITS_COCO_PANOPTIC = { "coco_2017_train_panoptic": ( # This is the original panoptic annotation directory f"./prepare_ours/u2seg_annotations/panoptic_annotations/cocotrain_{cluster_num}", # this should be .png format annotations f"./prepare_ours/u2seg_annotations/panoptic_annotations/cocotrain_{cluster_num}.json", #this should be .json file # This directory contains semantic annotations that are # converted from panoptic annotations. # It is used by PanopticFPN. # You can use the script at detectron2/datasets/prepare_panoptic_fpn.py # to create these directories. f"./prepare_ours/u2seg_annotations/panoptic_annotations/panoptic_stuff_cocotrain_{cluster_num}", ), "coco_2017_val_panoptic": ( "/home/niudt/u2seg_test/detectron2/datasets/datasets/coco/val2017", "/home/niudt/u2seg_test/detectron2/datasets/datasets/panoptic_anns/panoptic_val2017.json", "/home/niudt/u2seg_test/detectron2/datasets/datasets/panoptic_anns/panoptic_stuff_val2017", ), "coco_2017_val_100_panoptic": ( "coco/panoptic_val2017_100", "coco/annotations/panoptic_val2017_100.json", "coco/panoptic_stuff_val2017_100", ), } def register_all_coco(root): for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_COCO.items(): for key, (image_root, json_file) in splits_per_dataset.items(): # Assume pre-defined datasets live in `./datasets`. register_coco_instances( key, _get_builtin_metadata(dataset_name), os.path.join(root, json_file) if "://" not in json_file else json_file, os.path.join(root, image_root), ) for ( prefix, (panoptic_root, panoptic_json, semantic_root), ) in _PREDEFINED_SPLITS_COCO_PANOPTIC.items(): prefix_instances = prefix[: -len("_panoptic")] instances_meta = MetadataCatalog.get(prefix_instances) image_root, instances_json = instances_meta.image_root, instances_meta.json_file # The "separated" version of COCO panoptic segmentation dataset, # e.g. used by Panoptic FPN # import pdb # pdb.set_trace() register_coco_panoptic_separated( prefix, _get_builtin_metadata("coco_panoptic_separated"), image_root, os.path.join(root, panoptic_root), os.path.join(root, panoptic_json), os.path.join(root, semantic_root), instances_json, ) # The "standard" version of COCO panoptic segmentation dataset, # e.g. used by Panoptic-DeepLab register_coco_panoptic( prefix, _get_builtin_metadata("coco_panoptic_standard"), image_root, os.path.join(root, panoptic_root), os.path.join(root, panoptic_json), instances_json, ) # ==== Predefined datasets and splits for LVIS ========== _PREDEFINED_SPLITS_LVIS = { "lvis_v1": { "lvis_v1_train": ("coco/", "lvis/lvis_v1_train.json"), "lvis_v1_val": ("coco/", "lvis/lvis_v1_val.json"), "lvis_v1_test_dev": ("coco/", "lvis/lvis_v1_image_info_test_dev.json"), "lvis_v1_test_challenge": ("coco/", "lvis/lvis_v1_image_info_test_challenge.json"), }, "lvis_v0.5": { "lvis_v0.5_train": ("coco/", "lvis/lvis_v0.5_train.json"), "lvis_v0.5_val": ("coco/", "lvis/lvis_v0.5_val.json"), "lvis_v0.5_val_rand_100": ("coco/", "lvis/lvis_v0.5_val_rand_100.json"), "lvis_v0.5_test": ("coco/", "lvis/lvis_v0.5_image_info_test.json"), }, "lvis_v0.5_cocofied": { "lvis_v0.5_train_cocofied": ("coco/", "lvis/lvis_v0.5_train_cocofied.json"), "lvis_v0.5_val_cocofied": ("coco/", "lvis/lvis_v0.5_val_cocofied.json"), }, } def register_all_lvis(root): for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_LVIS.items(): for key, (image_root, json_file) in splits_per_dataset.items(): register_lvis_instances( key, get_lvis_instances_meta(dataset_name), os.path.join(root, json_file) if "://" not in json_file else json_file, os.path.join(root, image_root), ) # ==== Predefined splits for raw cityscapes images =========== _RAW_CITYSCAPES_SPLITS = { "cityscapes_fine_{task}_train": ("cityscapes/leftImg8bit/train/", "cityscapes/gtFine/train/"), "cityscapes_fine_{task}_val": ("cityscapes/leftImg8bit/val/", "cityscapes/gtFine/val/"), "cityscapes_fine_{task}_test": ("cityscapes/leftImg8bit/test/", "cityscapes/gtFine/test/"), } def register_all_cityscapes(root): for key, (image_dir, gt_dir) in _RAW_CITYSCAPES_SPLITS.items(): meta = _get_builtin_metadata("cityscapes") image_dir = os.path.join(root, image_dir) gt_dir = os.path.join(root, gt_dir) inst_key = key.format(task="instance_seg") DatasetCatalog.register( inst_key, lambda x=image_dir, y=gt_dir: load_cityscapes_instances( x, y, from_json=True, to_polygons=True ), ) MetadataCatalog.get(inst_key).set( image_dir=image_dir, gt_dir=gt_dir, evaluator_type="cityscapes_instance", **meta ) sem_key = key.format(task="sem_seg") DatasetCatalog.register( sem_key, lambda x=image_dir, y=gt_dir: load_cityscapes_semantic(x, y) ) MetadataCatalog.get(sem_key).set( image_dir=image_dir, gt_dir=gt_dir, evaluator_type="cityscapes_sem_seg", ignore_label=255, **meta, ) # ==== Predefined splits for PASCAL VOC =========== def register_all_pascal_voc(root): SPLITS = [ ("voc_2007_trainval", "VOC2007", "trainval"), ("voc_2007_train", "VOC2007", "train"), ("voc_2007_val", "VOC2007", "val"), ("voc_2007_test", "VOC2007", "test"), ("voc_2012_trainval", "VOC2012", "trainval"), ("voc_2012_train", "VOC2012", "train"), ("voc_2012_val", "VOC2012", "val"), ] for name, dirname, split in SPLITS: year = 2007 if "2007" in name else 2012 register_pascal_voc(name, os.path.join(root, dirname), split, year) MetadataCatalog.get(name).evaluator_type = "pascal_voc" def register_all_ade20k(root): root = os.path.join(root, "ADEChallengeData2016") for name, dirname in [("train", "training"), ("val", "validation")]: image_dir = os.path.join(root, "images", dirname) gt_dir = os.path.join(root, "annotations_detectron2", dirname) name = f"ade20k_sem_seg_{name}" DatasetCatalog.register( name, lambda x=image_dir, y=gt_dir: load_sem_seg(y, x, gt_ext="png", image_ext="jpg") ) MetadataCatalog.get(name).set( stuff_classes=ADE20K_SEM_SEG_CATEGORIES[:], image_root=image_dir, sem_seg_root=gt_dir, evaluator_type="sem_seg", ignore_label=255, ) # True for open source; # Internally at fb, we register them elsewhere if __name__.endswith(".builtin"): # Assume pre-defined datasets live in `./datasets`. _root = "datasets" # _root = os.path.expanduser(os.getenv("DETECTRON2_DATASETS", "datasets")) register_all_coco(_root) register_all_lvis(_root) register_all_cityscapes(_root)
register_all_cityscapes_panoptic(_root)
5
2023-12-05 01:13:31+00:00
12k
upfusion3d/upfusion
upsrt/model/model.py
[ { "identifier": "ResNetConv", "path": "upsrt/model/resnet.py", "snippet": "class ResNetConv(nn.Module):\n def __init__(self, n_blocks=3, use_feature_pyramid=False, num_patches_x=None, num_patches_y=None):\n super(ResNetConv, self).__init__()\n self.resnet = resnet18(pretrained=True)\n self.n_blocks = n_blocks\n self.use_feature_pyramid = use_feature_pyramid\n self.num_patches_x = num_patches_x\n self.num_patches_y = num_patches_y\n\n def forward(self, x):\n n_blocks = self.n_blocks\n x = self.resnet.conv1(x)\n x = self.resnet.bn1(x)\n x = self.resnet.relu(x)\n x = self.resnet.maxpool(x)\n out = x\n\n if n_blocks >= 1:\n x = self.resnet.layer1(x) # (B, C, H/2, W/2)\n if self.use_feature_pyramid:\n out = F.interpolate(x, size=(self.num_patches_y, self.num_patches_x), mode='bilinear') # (B,C1,Py,Px)\n else:\n out = x\n if n_blocks >= 2:\n x = self.resnet.layer2(x)\n if self.use_feature_pyramid:\n x = F.interpolate(x, size=(self.num_patches_y, self.num_patches_x), mode='bilinear') # (B,C2,Py,Px)\n out = torch.cat([out, x], dim=1) # (B,C1+C2,Py,Px)\n else:\n out = x\n if n_blocks >= 3:\n x = self.resnet.layer3(x)\n if self.use_feature_pyramid:\n x = F.interpolate(x, size=(self.num_patches_y, self.num_patches_x), mode='bilinear') # (B,C3,Py,Px)\n out = torch.cat([out, x], dim=1) # (B,C1+C2+C3,Py,Px)\n else:\n out = x\n if n_blocks >= 4:\n x = self.resnet.layer4(x)\n if self.use_feature_pyramid:\n x = F.interpolate(x, size=(self.num_patches_y, self.num_patches_x), mode='bilinear') # (B,C,Py,Px)\n out = torch.cat([out, x], dim=1) # (B,4C,Py,Px)\n else:\n out = x\n\n return out" }, { "identifier": "TransformerEncoder", "path": "upsrt/model/transformer.py", "snippet": "class TransformerEncoder(nn.Module):\n def __init__(self, encoder_layer, num_layers):\n super().__init__()\n self.layers = _get_clones(encoder_layer, num_layers)\n self.num_layers = num_layers\n\n def forward(self, x):\n for mod in self.layers:\n x = mod(x)\n return x" }, { "identifier": "TransformerEncoderBlock", "path": "upsrt/model/transformer.py", "snippet": "class TransformerEncoderBlock(nn.Module):\n\n def __init__(\n self, attn_type, d_model, nhead, dim_feedforward=1024, dropout=0.1, activation=F.relu,\n layer_norm_eps=1e-5, device=None, dtype=None\n ):\n\n super().__init__()\n factory_kwargs = {'device': device, 'dtype': dtype}\n\n self.d_model = d_model\n self.nhead = nhead\n\n if attn_type == \"default\":\n self._default_mha = nn.MultiheadAttention(\n d_model, nhead, dropout=dropout, batch_first=True,\n **factory_kwargs\n )\n self.self_attn = self._default_mha_wrapper\n\n elif attn_type == \"xformers\":\n self.to_q = nn.Linear(d_model, d_model, bias = False)\n self.to_k = nn.Linear(d_model, d_model, bias = False)\n self.to_v = nn.Linear(d_model, d_model, bias = False)\n self.to_out = nn.Linear(d_model, d_model, bias = False)\n self.self_attn = self._xformers_mha_wrapper\n\n else:\n raise ValueError(f\"Unsupported attn_type: {attn_type}\")\n\n # Implementation of Feedforward model\n self.linear1 = nn.Linear(d_model, dim_feedforward, **factory_kwargs)\n self.dropout = nn.Dropout(dropout)\n self.linear2 = nn.Linear(dim_feedforward, d_model, **factory_kwargs)\n\n self.norm1 = nn.LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)\n self.norm2 = nn.LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)\n self.dropout1 = nn.Dropout(dropout)\n self.dropout2 = nn.Dropout(dropout)\n\n # Legacy string support for activation function.\n if isinstance(activation, str):\n self.activation = _get_activation_fn(activation)\n else:\n self.activation = activation\n\n def __setstate__(self, state):\n if 'activation' not in state:\n state['activation'] = F.relu\n super(TransformerEncoderBlock, self).__setstate__(state)\n\n def forward(self, x):\n x = x + self._sa_block(self.norm1(x))\n x = x + self._ff_block(self.norm2(x))\n return x\n\n def _sa_block(self, x):\n x = self.self_attn(q=x, k=x, v=x)\n x = self.dropout1(x)\n return x\n\n def _ff_block(self, x):\n x = self.dropout2(self.linear2(self.dropout(self.activation(self.linear1(x)))))\n return x\n\n def _default_mha_wrapper(self, q, k, v):\n\n output = self._default_mha(\n query=q, key=k, value=v,\n attn_mask=None, key_padding_mask=None, need_weights=False\n )[0]\n return output\n\n def _xformers_mha_wrapper(self, q, k, v):\n\n q = self.to_q(q)\n k = self.to_k(k)\n v = self.to_v(v)\n\n q = self._split_heads(q)\n k = self._split_heads(k)\n v = self._split_heads(v)\n output = memory_efficient_attention(query=q, key=k, value=v)\n output = self._join_heads(output)\n\n output = self.to_out(output)\n return output\n\n def _split_heads(self, x):\n s = x.shape\n return torch.reshape(x, (s[0], s[1], self.nhead, -1))\n\n def _join_heads(self, x):\n s = x.shape\n return torch.reshape(x, (s[0], s[1], s[2]*s[3]))" }, { "identifier": "TransformerDecoder", "path": "upsrt/model/transformer.py", "snippet": "class TransformerDecoder(nn.Module):\n def __init__(self, decoder_layer, num_layers):\n super().__init__()\n self.layers = _get_clones(decoder_layer, num_layers)\n self.num_layers = num_layers\n\n def forward(self, x, memory):\n for mod in self.layers:\n x = mod(x, memory)\n return x" }, { "identifier": "TransformerDecoderBlock", "path": "upsrt/model/transformer.py", "snippet": "class TransformerDecoderBlock(nn.Module):\n\n def __init__(\n self, attn_type, d_model, nhead, dim_feedforward=1024, dropout=0.1, activation=F.relu,\n layer_norm_eps=1e-5, device=None, dtype=None\n ):\n\n super().__init__()\n factory_kwargs = {'device': device, 'dtype': dtype}\n\n self.d_model = d_model\n self.nhead = nhead\n\n if attn_type == \"default\":\n self._default_mha = nn.MultiheadAttention(\n d_model, nhead, dropout=dropout, batch_first=True,\n **factory_kwargs\n )\n self.cross_attn = self._default_mha_wrapper\n\n elif attn_type == \"xformers\":\n self.to_q = nn.Linear(d_model, d_model, bias = False)\n self.to_k = nn.Linear(d_model, d_model, bias = False)\n self.to_v = nn.Linear(d_model, d_model, bias = False)\n self.to_out = nn.Linear(d_model, d_model, bias = False)\n self.cross_attn = self._xformers_mha_wrapper\n\n else:\n raise ValueError(f\"Unsupported attn_type: {attn_type}\")\n\n # Implementation of Feedforward model\n self.linear1 = nn.Linear(d_model, dim_feedforward, **factory_kwargs)\n self.dropout = nn.Dropout(dropout)\n self.linear2 = nn.Linear(dim_feedforward, d_model, **factory_kwargs)\n\n self.norm1 = nn.LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)\n self.norm2 = nn.LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)\n self.dropout1 = nn.Dropout(dropout)\n self.dropout2 = nn.Dropout(dropout)\n\n # Legacy string support for activation function.\n if isinstance(activation, str):\n self.activation = _get_activation_fn(activation)\n else:\n self.activation = activation\n\n def __setstate__(self, state):\n if 'activation' not in state:\n state['activation'] = F.relu\n super(TransformerDecoderBlock, self).__setstate__(state)\n\n def forward(self, x, memory):\n x = x + self._ca_block(self.norm1(x), memory)\n x = x + self._ff_block(self.norm2(x))\n return x\n\n def _ca_block(self, x, memory):\n x = self.cross_attn(q=x, k=memory, v=memory)\n x = self.dropout1(x)\n return x\n\n def _ff_block(self, x):\n x = self.dropout2(self.linear2(self.dropout(self.activation(self.linear1(x)))))\n return x\n\n def _default_mha_wrapper(self, q, k, v):\n\n output = self._default_mha(\n query=q, key=k, value=v,\n attn_mask=None, key_padding_mask=None, need_weights=False\n )[0]\n return output\n\n def _xformers_mha_wrapper(self, q, k, v):\n\n q = self.to_q(q)\n k = self.to_k(k)\n v = self.to_v(v)\n\n q = self._split_heads(q)\n k = self._split_heads(k)\n v = self._split_heads(v)\n output = memory_efficient_attention(query=q, key=k, value=v)\n output = self._join_heads(output)\n\n output = self.to_out(output)\n return output\n\n def _split_heads(self, x):\n s = x.shape\n return torch.reshape(x, (s[0], s[1], self.nhead, -1))\n\n def _join_heads(self, x):\n s = x.shape\n return torch.reshape(x, (s[0], s[1], s[2]*s[3]))" }, { "identifier": "plucker_dist", "path": "upsrt/model/utils.py", "snippet": "def plucker_dist(ray1, ray2, eps=1e-6):\n # Plucker ray is represented as (l, m),\n # l is direction unit norm, m = (oxl)\n\n # ray1 (l1, m1): (B, Q, 6)\n # ray2 (l2, m2): (B, P, 6)\n\n Q = ray1.shape[1]\n P = ray2.shape[1]\n\n ray1 = ray1.unsqueeze(2).repeat(1, 1, P, 1) # (B, Q, P, 6)\n ray2 = ray2.unsqueeze(1).repeat(1, Q, 1, 1) # (B, Q, P, 6)\n\n # (l1, m1) * (l2, m2) = l1.m2 + l2.m1\n reci_prod = ((ray1[..., :3] * ray2[..., 3:]).sum(-1) + \\\n (ray1[..., 3:] * ray2[..., :3]).sum(-1)).abs() # (B, Q, P)\n\n # || l1 x l2 ||\n l1_cross_l2 = torch.cross(ray1[..., :3], ray2[..., :3], dim=-1) # (B, Q, P, 3)\n l1_cross_l2_norm = l1_cross_l2.norm(dim=-1) # (B, Q, P)\n\n # || l1 x (m1-m2)/s ||\n # s = ray2[..., :3] / ray1[..., :3] # (B, Q, P, 3)\n # s = s.mean(dim=-1).unsqueeze(-1) # (B, Q, P, 1)\n s = 1\n l1_cross_m1_minus_m2 = torch.cross(ray1[..., :3], (ray1[..., 3:] - ray2[..., 3:])/s)\n l1_cross_m1_minus_m2_norm = l1_cross_m1_minus_m2.norm(dim=-1) # (B, Q, P)\n\n # ||l1||^2\n l1_norm_sq = torch.norm(ray1[..., :3], dim=-1) ** 2 # (B, Q, P)\n\n distance = l1_cross_m1_minus_m2_norm / (l1_norm_sq + eps) # (B, Q, P)\n mask = (l1_cross_l2_norm > eps)\n distance[mask] = reci_prod[mask] / (l1_cross_l2_norm[mask] + eps)\n\n return distance" }, { "identifier": "transform_rays", "path": "upsrt/model/utils.py", "snippet": "def transform_rays(reference_R, reference_T, rays):\n '''\n PyTorch3D Convention is used: X_cam = X_world @ R + T\n\n Args:\n reference_R: world2cam rotation matrix for reference camera (B, 3, 3)\n reference_T: world2cam translation vector for reference camera (B, 3)\n rays: (origin, direction) defined in world reference frame (B, V, N, 6)\n Returns:\n torch.Tensor: Transformed rays w.r.t. reference camera (B, V, N, 6)\n '''\n batch, num_views, num_rays, ray_dim = rays.shape\n assert ray_dim == 6, \\\n 'First 3 dimensions should be origin; Last 3 dimensions should be direction'\n\n rays = rays.reshape(batch, num_views*num_rays, ray_dim)\n rays_out = rays.clone()\n rays_out[..., :3] = torch.bmm(rays[..., :3], reference_R) + reference_T.unsqueeze(-2)\n rays_out[..., 3:] = torch.bmm(rays[..., 3:], reference_R)\n rays_out = rays_out.reshape(batch, num_views, num_rays, ray_dim)\n return rays_out" }, { "identifier": "get_grid_rays", "path": "upsrt/renderer/rays.py", "snippet": "def get_grid_rays(cameras_list, image_size, min_x, min_y, max_x, max_y, device):\n \"\"\"Returns rays in a grid (one per pixel) parameterized as a 6-D vector (origin: x, y, z | direction: a, b, c).\n\n Args:\n cameras_list(list[pytorch3d.renderer.cameras.CamerasBase]): List of Pytorch3D cameras of length (n_cameras,).\n image_size(tuple[int, int]): Size of the image in pixels (height, width).\n device(torch.device): The device on which the generated rays must be cast.\n\n Returns:\n tuple[torch.Tensor, torch.Tensor]: Tuple containing:\n 1) grid_rays(torch.Tensor): Tensor of shape (n_cameras, H * W, 6) denoting the encoded rays (where the\n last dimension corresponds to the 6-D parameterized representation\n (origin | direction),\n 2) xys(torch.Tensor): Tensor of shape (n_cameras, H * W, 2) denoting the NDC coordinates of the\n point in the image through which the corresponding ray passes.\n \"\"\"\n # Obtain grid raybundle: Each element in the following list corresponds to a raybundle whose attributes (origins, xys, directions, lengths) are\n # tensors of shape (1, H*W, d) where d is the dimensionality of the quantity.\n grid_rays = [\n get_grid_raybundle(camera, image_size=image_size, min_x=min_x, min_y=min_y, max_x=max_x, max_y=max_y)\n for camera in cameras_list\n ]\n\n # Concatenate xys (along the batch dimension) to create a single tensor\n xys = [grid_ray.xys for grid_ray in grid_rays] # grid_rays.xys -> (1, H*W, 2)\n xys = torch.cat(xys, dim=0) # (n_cameras, H*W, 2)\n\n # Concatenate origins and directions to create a single tensor\n # The final rays are represented as the 6-dimensional representation (origin|direction)\n grid_rays = [torch.cat((grid_ray.origins, grid_ray.directions), dim=-1) for grid_ray in grid_rays]\n grid_rays = torch.cat(grid_rays, dim=0).to(device) # (n_cameras, H*W, 6)\n\n return grid_rays, xys" }, { "identifier": "get_patch_rays", "path": "upsrt/renderer/rays.py", "snippet": "def get_patch_rays(cameras_list, num_patches_x, num_patches_y, device):\n \"\"\"Returns patch rays given the camera viewpoints\n\n Args:\n cameras_list(list[pytorch3d.renderer.cameras.BaseCameras]): List of list of cameras (len (batch_size, num_input_views,))\n num_patches_x: Number of patches in the x-direction (horizontal)\n num_patches_y: Number of patches in the y-direction (vertical)\n\n Returns:\n torch.tensor: Patch rays of shape (batch_size, num_views, num_patches, 6)\n \"\"\"\n batch, numviews = len(cameras_list), len(cameras_list[0])\n cameras_list = [cam for cam_batch in cameras_list for cam in cam_batch] # Flatten the cameras list\n patch_rays = [\n get_patch_raybundle(camera, num_patches_y=num_patches_y, num_patches_x=num_patches_x)\n for camera in cameras_list\n ]\n\n # list of len (B * num_views), having (1, P, 6) tensors\n patch_rays = [torch.cat((patch_ray.origins, patch_ray.directions), dim=-1) for patch_ray in patch_rays]\n\n # patch_ray.origins -> (1, P, 3), patch_ray.directions -> (1, P, 3)\n patch_rays = torch.cat(patch_rays, dim=0) # (B * num_views, P, 6)\n\n patch_rays = patch_rays.reshape(batch, numviews, num_patches_x * num_patches_y, 6).to(device)\n return patch_rays" }, { "identifier": "get_plucker_parameterization", "path": "upsrt/renderer/rays.py", "snippet": "def get_plucker_parameterization(ray):\n \"\"\"Returns the plucker representation of the rays given the (origin, direction) representation\n\n Args:\n ray(torch.Tensor): Tensor of shape (..., 6) with the (origin, direction) representation\n\n Returns:\n torch.Tensor: Tensor of shape (..., 6) with the plucker (D, OxD) representation\n \"\"\"\n ray = ray.clone() # Create a clone\n ray_origins = ray[..., :3]\n ray_directions = ray[..., 3:]\n ray_directions = ray_directions / ray_directions.norm(dim=-1).unsqueeze(-1) # Normalize ray directions to unit vectors\n plucker_normal = torch.cross(ray_origins, ray_directions, dim=-1)\n plucker_parameterization = torch.cat([ray_directions, plucker_normal], dim=-1)\n\n return plucker_parameterization" }, { "identifier": "get_random_query_pixel_rays", "path": "upsrt/renderer/rays.py", "snippet": "def get_random_query_pixel_rays(\n cameras_list, num_pixel_queries, query_ray_filter, min_x,\n min_y, max_x, max_y, device, return_xys=False,\n):\n \"\"\"Returns query rays given the camera viewpoints\n\n Args:\n cameras_list(list[pytorch3d.renderer.cameras.BaseCameras]): List of len (batch_size,) containing query cameras\n num_pixel_queries(int): Number of pixel queries\n query_ray_filter(torch.Tensor|None): A tensor of shape (B, H, W) containing batch of masks within which the rays should be sampled. If None, considers the limits to be (+1, -1).\n\n Returns:\n torch.tensor: Query rays of shape (batch_size, num_pixel_queries, 6)\n \"\"\"\n\n B = len(cameras_list)\n\n # If query ray filter (mask) is provided, then obtain X&Y limits from the mask\n if query_ray_filter is None:\n min_x_list, min_y_list, max_x_list, max_y_list = [min_x]*B, [min_y]*B, [max_x]*B, [max_y]*B\n else:\n min_x_list, min_y_list, max_x_list, max_y_list = process_query_ray_filter(query_ray_filter)\n\n random_query_rays = [\n get_random_raybundle(camera, num_rays=num_pixel_queries, min_x=min_x, min_y=min_y, max_x=max_x, max_y=max_y)\n for camera, min_x, min_y, max_x, max_y in zip(cameras_list, min_x_list, min_y_list, max_x_list, max_y_list)\n ]\n xys = [query_ray.xys for query_ray in random_query_rays]\n # query_ray.xys -> (1, num_queries, 2)\n xys = torch.cat(xys, dim=0).to(device) # (batch_size, num_queries, 2)\n\n random_query_rays = [torch.cat((patch_ray.origins, patch_ray.directions), dim=-1) for patch_ray in random_query_rays]\n random_query_rays = torch.cat(random_query_rays, dim=0).to(device) # (batch_size, num_queries, 6)\n\n assert random_query_rays.shape == (len(xys), num_pixel_queries, 6)\n\n if return_xys:\n return random_query_rays, xys\n else:\n return random_query_rays" }, { "identifier": "positional_encoding", "path": "upsrt/renderer/rays.py", "snippet": "def positional_encoding(ray, n_freqs=10, parameterize=None, start_freq=0):\n \"\"\"\n Positional Embeddings. For more details see Section 5.1 of\n NeRFs: https://arxiv.org/pdf/2003.08934.pdf\n\n Args:\n ray: (B,num_input_views,P,6)\n n_freqs: num of frequency bands\n parameterize(str|None): Parameterization used for rays. Recommended: use 'plucker'. Default=None.\n\n Returns:\n pos_embeddings: Mapping input ray from R to R^{2*n_freqs}.\n \"\"\"\n\n if parameterize is None:\n pass\n elif parameterize == 'plucker':\n # direction unit-normalized, (o+nd, d) has same representation as (o+md, d) [4 DOF]\n # ray_origins = ray[..., :3]\n # ray_directions = ray[..., 3:]\n # ray_directions = ray_directions / ray_directions.norm(dim=-1).unsqueeze(-1) # Normalize ray directions to unit vectors\n # plucker_normal = torch.cross(ray_origins, ray_directions, dim=-1)\n # plucker_parameterization = torch.cat([ray_directions, plucker_normal], dim=-1)\n ray = get_plucker_parameterization(ray)\n else:\n raise NotImplementedError(f'parameterize={parameterize} not implemented.')\n\n freq_bands = 2. ** torch.arange(start_freq, start_freq+n_freqs) * np.pi\n sin_encodings = [torch.sin(ray * freq) for freq in freq_bands]\n cos_encodings = [torch.cos(ray * freq) for freq in freq_bands]\n\n pos_embeddings = torch.cat(sin_encodings + cos_encodings, dim=-1) # B, num_input_views, P, 6 * 2n_freqs\n return pos_embeddings" }, { "identifier": "get_grid_rays_gpu", "path": "upsrt/renderer/rays.py", "snippet": "def get_grid_rays_gpu(cameras_list, image_size, min_x, min_y, max_x, max_y):\n \"\"\"Returns rays in a grid (one per pixel) parameterized as a 6-D vector (origin: x, y, z | direction: a, b, c).\n\n Args:\n cameras_list(list[pytorch3d.renderer.cameras.CamerasBase]): List of Pytorch3D cameras of length (n_cameras,).\n image_size(tuple[int, int]): Size of the image in pixels (height, width).\n device(torch.device): The device on which the generated rays must be cast.\n\n Returns:\n tuple[torch.Tensor, torch.Tensor]: Tuple containing:\n 1) grid_rays(torch.Tensor): Tensor of shape (n_cameras, H * W, 6) denoting the encoded rays (where the\n last dimension corresponds to the 6-D parameterized representation\n (origin | direction),\n 2) xys(torch.Tensor): Tensor of shape (n_cameras, H * W, 2) denoting the NDC coordinates of the\n point in the image through which the corresponding ray passes.\n \"\"\"\n # Obtain grid raybundle: Each element in the following list corresponds to a raybundle whose attributes (origins, xys, directions, lengths) are\n # tensors of shape (1, H*W, d) where d is the dimensionality of the quantity.\n grid_rays = [\n get_grid_raybundle_gpu(camera, image_size=image_size, min_x=min_x, min_y=min_y, max_x=max_x, max_y=max_y)\n for camera in cameras_list\n ]\n\n # Concatenate xys (along the batch dimension) to create a single tensor\n xys = [grid_ray.xys for grid_ray in grid_rays] # grid_rays.xys -> (1, H*W, 2)\n xys = torch.cat(xys, dim=0) # (n_cameras, H*W, 2)\n\n # Concatenate origins and directions to create a single tensor\n # The final rays are represented as the 6-dimensional representation (origin|direction)\n grid_rays = [torch.cat((grid_ray.origins, grid_ray.directions), dim=-1) for grid_ray in grid_rays]\n grid_rays = torch.cat(grid_rays, dim=0) # (n_cameras, H*W, 6)\n\n return grid_rays, xys" }, { "identifier": "create_patch_id_encoding", "path": "upsrt/utils/id_encoding.py", "snippet": "def create_patch_id_encoding(img_features_shape, num_patches, n_freqs, start_freq):\r\n \"\"\"\r\n TODO: Elaborate\r\n \"\"\"\r\n b, n_inp = img_features_shape[:2]\r\n patch_ids_list = [\r\n torch.full((b*n_inp, 1, 1), i/num_patches, dtype=torch.float32)\r\n for i in range(1, num_patches+1)\r\n ]\r\n patch_ids = torch.cat(patch_ids_list, dim=1)\r\n pos_encoded_patch_ids = positionally_encode_ids(patch_ids, n_freqs, start_freq)\r\n pos_encoded_patch_ids = pos_encoded_patch_ids.reshape(b, n_inp, num_patches, 2*n_freqs)\r\n\r\n return pos_encoded_patch_ids\r" }, { "identifier": "create_camera_id_encoding", "path": "upsrt/utils/id_encoding.py", "snippet": "def create_camera_id_encoding(img_features_shape, num_patches, n_freqs, start_freq):\r\n \"\"\"\r\n TODO: Elaborate\r\n \"\"\"\r\n b, n_inp = img_features_shape[:2]\r\n canonical_camera_id = torch.full((b, 1, num_patches, 1), 1/3 + 0.05, dtype=torch.float32)\r\n other_camera_id = torch.full((b, n_inp-1, num_patches, 1), 2/3 + 0.05, dtype=torch.float32)\r\n camera_ids = torch.cat((canonical_camera_id, other_camera_id), dim=1)\r\n pos_encoded_camera_ids = positionally_encode_ids(camera_ids, n_freqs, start_freq)\r\n\r\n return pos_encoded_camera_ids\r" } ]
import math import torch import torch.nn as nn from upsrt.model.resnet import ResNetConv from upsrt.model.transformer import ( TransformerEncoder, TransformerEncoderBlock, TransformerDecoder, TransformerDecoderBlock ) from upsrt.model.utils import plucker_dist, transform_rays from upsrt.renderer.rays import ( get_grid_rays, get_patch_rays, get_plucker_parameterization, get_random_query_pixel_rays, positional_encoding, get_grid_rays_gpu ) from pytorch3d.renderer.cameras import PerspectiveCameras from upsrt.utils.id_encoding import create_patch_id_encoding, create_camera_id_encoding
7,295
ATTN_TYPE = "xformers" class SceneEncoder(nn.Module): """ Takes set of patch-wise image and ray features as input and computes a set latent encoding for the scene """ def __init__(self, cfg): super(SceneEncoder, self).__init__() # Transformer architecture params self.transformer_dim = cfg.transformer_dim self.encoder_hidden_activation = 'gelu' self.encoder_n_attention_heads = 12 self.encoder_num_layers = cfg.num_encoder_layers self.transformer_encoder = TransformerEncoder( encoder_layer = TransformerEncoderBlock( attn_type=ATTN_TYPE, d_model=self.transformer_dim, nhead=self.encoder_n_attention_heads, activation=self.encoder_hidden_activation ), num_layers = self.encoder_num_layers ) def forward(self, scene_features): """ Args: scene_features: (b, n_inp, patch, transformer_dim) src_mask(torch.Tensor): FloatTensor (additive mask) of shape (b * n_heads, n_inp * patch, n_inp * patch) Returns: torch.Tensor: Tensor of shape (n_inp*patch, b, d_model) representing scene latent encoding """ b, n_inp, n_patch, _ = scene_features.shape encoder_input = torch.reshape(scene_features, (b, n_inp * n_patch, self.transformer_dim)) # (b, n_inp*patch, d_model) scene_encoding = self.transformer_encoder(encoder_input) # (b, n_inp*patch, d_model) return scene_encoding class RayDecoder(nn.Module): """ Decodes color value for each query pixel ray using a set latent encoding """ def __init__(self, cfg): super(RayDecoder, self).__init__() # Transformer architecture params self.transformer_dim = cfg.transformer_dim self.decoder_hidden_activation = 'gelu' self.decoder_n_attention_heads = 12 self.decoder_num_layers = cfg.num_decoder_layers self.transformer_decoder = TransformerDecoder(
ATTN_TYPE = "xformers" class SceneEncoder(nn.Module): """ Takes set of patch-wise image and ray features as input and computes a set latent encoding for the scene """ def __init__(self, cfg): super(SceneEncoder, self).__init__() # Transformer architecture params self.transformer_dim = cfg.transformer_dim self.encoder_hidden_activation = 'gelu' self.encoder_n_attention_heads = 12 self.encoder_num_layers = cfg.num_encoder_layers self.transformer_encoder = TransformerEncoder( encoder_layer = TransformerEncoderBlock( attn_type=ATTN_TYPE, d_model=self.transformer_dim, nhead=self.encoder_n_attention_heads, activation=self.encoder_hidden_activation ), num_layers = self.encoder_num_layers ) def forward(self, scene_features): """ Args: scene_features: (b, n_inp, patch, transformer_dim) src_mask(torch.Tensor): FloatTensor (additive mask) of shape (b * n_heads, n_inp * patch, n_inp * patch) Returns: torch.Tensor: Tensor of shape (n_inp*patch, b, d_model) representing scene latent encoding """ b, n_inp, n_patch, _ = scene_features.shape encoder_input = torch.reshape(scene_features, (b, n_inp * n_patch, self.transformer_dim)) # (b, n_inp*patch, d_model) scene_encoding = self.transformer_encoder(encoder_input) # (b, n_inp*patch, d_model) return scene_encoding class RayDecoder(nn.Module): """ Decodes color value for each query pixel ray using a set latent encoding """ def __init__(self, cfg): super(RayDecoder, self).__init__() # Transformer architecture params self.transformer_dim = cfg.transformer_dim self.decoder_hidden_activation = 'gelu' self.decoder_n_attention_heads = 12 self.decoder_num_layers = cfg.num_decoder_layers self.transformer_decoder = TransformerDecoder(
decoder_layer = TransformerDecoderBlock(
4
2023-12-12 00:49:11+00:00
12k
modelscope/normal-depth-diffusion
libs/omnidata_torch/lib/zoe/zoedepth/models/zoedepth/zoedepth_v1.py
[ { "identifier": "DepthModel", "path": "libs/omnidata_torch/lib/zoe/zoedepth/models/depth_model.py", "snippet": "class DepthModel(nn.Module):\n def __init__(self):\n super().__init__()\n self.device = 'cpu'\n \n def to(self, device) -> nn.Module:\n self.device = device\n return super().to(device)\n \n def forward(self, x, *args, **kwargs):\n raise NotImplementedError\n \n def _infer(self, x: torch.Tensor):\n \"\"\"\n Inference interface for the model\n Args:\n x (torch.Tensor): input tensor of shape (b, c, h, w)\n Returns:\n torch.Tensor: output tensor of shape (b, 1, h, w)\n \"\"\"\n return self(x)['metric_depth']\n \n def _infer_with_pad_aug(self, x: torch.Tensor, pad_input: bool=True, fh: float=3, fw: float=3, upsampling_mode: str='bicubic', padding_mode=\"reflect\", **kwargs) -> torch.Tensor:\n \"\"\"\n Inference interface for the model with padding augmentation\n Padding augmentation fixes the boundary artifacts in the output depth map.\n Boundary artifacts are sometimes caused by the fact that the model is trained on NYU raw dataset which has a black or white border around the image.\n This augmentation pads the input image and crops the prediction back to the original size / view.\n\n Note: This augmentation is not required for the models trained with 'avoid_boundary'=True.\n Args:\n x (torch.Tensor): input tensor of shape (b, c, h, w)\n pad_input (bool, optional): whether to pad the input or not. Defaults to True.\n fh (float, optional): height padding factor. The padding is calculated as sqrt(h/2) * fh. Defaults to 3.\n fw (float, optional): width padding factor. The padding is calculated as sqrt(w/2) * fw. Defaults to 3.\n upsampling_mode (str, optional): upsampling mode. Defaults to 'bicubic'.\n padding_mode (str, optional): padding mode. Defaults to \"reflect\".\n Returns:\n torch.Tensor: output tensor of shape (b, 1, h, w)\n \"\"\"\n # assert x is nchw and c = 3\n assert x.dim() == 4, \"x must be 4 dimensional, got {}\".format(x.dim())\n assert x.shape[1] == 3, \"x must have 3 channels, got {}\".format(x.shape[1])\n\n if pad_input:\n assert fh > 0 or fw > 0, \"atlease one of fh and fw must be greater than 0\"\n pad_h = int(np.sqrt(x.shape[2]/2) * fh)\n pad_w = int(np.sqrt(x.shape[3]/2) * fw)\n padding = [pad_w, pad_w]\n if pad_h > 0:\n padding += [pad_h, pad_h]\n \n x = F.pad(x, padding, mode=padding_mode, **kwargs)\n out = self._infer(x)\n if out.shape[-2:] != x.shape[-2:]:\n out = F.interpolate(out, size=(x.shape[2], x.shape[3]), mode=upsampling_mode, align_corners=False)\n if pad_input:\n # crop to the original size, handling the case where pad_h and pad_w is 0\n if pad_h > 0:\n out = out[:, :, pad_h:-pad_h,:]\n if pad_w > 0:\n out = out[:, :, :, pad_w:-pad_w]\n return out\n \n def infer_with_flip_aug(self, x, pad_input: bool=True, **kwargs) -> torch.Tensor:\n \"\"\"\n Inference interface for the model with horizontal flip augmentation\n Horizontal flip augmentation improves the accuracy of the model by averaging the output of the model with and without horizontal flip.\n Args:\n x (torch.Tensor): input tensor of shape (b, c, h, w)\n pad_input (bool, optional): whether to use padding augmentation. Defaults to True.\n Returns:\n torch.Tensor: output tensor of shape (b, 1, h, w)\n \"\"\"\n # infer with horizontal flip and average\n out = self._infer_with_pad_aug(x, pad_input=pad_input, **kwargs)\n out_flip = self._infer_with_pad_aug(torch.flip(x, dims=[3]), pad_input=pad_input, **kwargs)\n out = (out + torch.flip(out_flip, dims=[3])) / 2\n return out\n \n def infer(self, x, pad_input: bool=True, with_flip_aug: bool=True, **kwargs) -> torch.Tensor:\n \"\"\"\n Inference interface for the model\n Args:\n x (torch.Tensor): input tensor of shape (b, c, h, w)\n pad_input (bool, optional): whether to use padding augmentation. Defaults to True.\n with_flip_aug (bool, optional): whether to use horizontal flip augmentation. Defaults to True.\n Returns:\n torch.Tensor: output tensor of shape (b, 1, h, w)\n \"\"\"\n if with_flip_aug:\n return self.infer_with_flip_aug(x, pad_input=pad_input, **kwargs)\n else:\n return self._infer_with_pad_aug(x, pad_input=pad_input, **kwargs)\n \n @torch.no_grad()\n def infer_pil(self, pil_img, pad_input: bool=True, with_flip_aug: bool=True, output_type: str=\"numpy\", **kwargs) -> Union[np.ndarray, PIL.Image.Image, torch.Tensor]:\n \"\"\"\n Inference interface for the model for PIL image\n Args:\n pil_img (PIL.Image.Image): input PIL image\n pad_input (bool, optional): whether to use padding augmentation. Defaults to True.\n with_flip_aug (bool, optional): whether to use horizontal flip augmentation. Defaults to True.\n output_type (str, optional): output type. Supported values are 'numpy', 'pil' and 'tensor'. Defaults to \"numpy\".\n \"\"\"\n x = transforms.ToTensor()(pil_img).unsqueeze(0).to(self.device)\n out_tensor = self.infer(x, pad_input=pad_input, with_flip_aug=with_flip_aug, **kwargs)\n if output_type == \"numpy\":\n return out_tensor.squeeze().cpu().numpy()\n elif output_type == \"pil\":\n # uint16 is required for depth pil image\n out_16bit_numpy = (out_tensor.squeeze().cpu().numpy()*256).astype(np.uint16)\n return Image.fromarray(out_16bit_numpy)\n elif output_type == \"tensor\":\n return out_tensor.squeeze().cpu()\n else:\n raise ValueError(f\"output_type {output_type} not supported. Supported values are 'numpy', 'pil' and 'tensor'\")" }, { "identifier": "MidasCore", "path": "libs/omnidata_torch/lib/zoe/zoedepth/models/base_models/midas.py", "snippet": "class MidasCore(nn.Module):\n def __init__(self, midas, trainable=False, fetch_features=True, layer_names=('out_conv', 'l4_rn', 'r4', 'r3', 'r2', 'r1'), freeze_bn=False, keep_aspect_ratio=True,\n img_size=384, **kwargs):\n \"\"\"Midas Base model used for multi-scale feature extraction.\n\n Args:\n midas (torch.nn.Module): Midas model.\n trainable (bool, optional): Train midas model. Defaults to False.\n fetch_features (bool, optional): Extract multi-scale features. Defaults to True.\n layer_names (tuple, optional): Layers used for feature extraction. Order = (head output features, last layer features, ...decoder features). Defaults to ('out_conv', 'l4_rn', 'r4', 'r3', 'r2', 'r1').\n freeze_bn (bool, optional): Freeze BatchNorm. Generally results in better finetuning performance. Defaults to False.\n keep_aspect_ratio (bool, optional): Keep the aspect ratio of input images while resizing. Defaults to True.\n img_size (int, tuple, optional): Input resolution. Defaults to 384.\n \"\"\"\n super().__init__()\n self.core = midas\n self.output_channels = None\n self.core_out = {}\n self.trainable = trainable\n self.fetch_features = fetch_features\n # midas.scratch.output_conv = nn.Identity()\n self.handles = []\n # self.layer_names = ['out_conv','l4_rn', 'r4', 'r3', 'r2', 'r1']\n self.layer_names = layer_names\n\n self.set_trainable(trainable)\n self.set_fetch_features(fetch_features)\n\n self.prep = PrepForMidas(keep_aspect_ratio=keep_aspect_ratio,\n img_size=img_size, do_resize=kwargs.get('do_resize', True))\n\n if freeze_bn:\n self.freeze_bn()\n\n def set_trainable(self, trainable):\n self.trainable = trainable\n if trainable:\n self.unfreeze()\n else:\n self.freeze()\n return self\n\n def set_fetch_features(self, fetch_features):\n self.fetch_features = fetch_features\n if fetch_features:\n if len(self.handles) == 0:\n self.attach_hooks(self.core)\n else:\n self.remove_hooks()\n return self\n\n def freeze(self):\n for p in self.parameters():\n p.requires_grad = False\n self.trainable = False\n return self\n\n def unfreeze(self):\n for p in self.parameters():\n p.requires_grad = True\n self.trainable = True\n return self\n\n def freeze_bn(self):\n for m in self.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eval()\n return self\n\n def forward(self, x, denorm=False, return_rel_depth=False):\n with torch.no_grad():\n if denorm:\n x = denormalize(x)\n x = self.prep(x)\n # print(\"Shape after prep: \", x.shape)\n\n with torch.set_grad_enabled(self.trainable):\n\n # print(\"Input size to Midascore\", x.shape)\n rel_depth = self.core(x)\n # print(\"Output from midas shape\", rel_depth.shape)\n if not self.fetch_features:\n return rel_depth\n out = [self.core_out[k] for k in self.layer_names]\n\n if return_rel_depth:\n return rel_depth, out\n return out\n\n def get_rel_pos_params(self):\n for name, p in self.core.pretrained.named_parameters():\n if \"relative_position\" in name:\n yield p\n\n def get_enc_params_except_rel_pos(self):\n for name, p in self.core.pretrained.named_parameters():\n if \"relative_position\" not in name:\n yield p\n\n def freeze_encoder(self, freeze_rel_pos=False):\n if freeze_rel_pos:\n for p in self.core.pretrained.parameters():\n p.requires_grad = False\n else:\n for p in self.get_enc_params_except_rel_pos():\n p.requires_grad = False\n return self\n\n def attach_hooks(self, midas):\n if len(self.handles) > 0:\n self.remove_hooks()\n if \"out_conv\" in self.layer_names:\n self.handles.append(list(midas.scratch.output_conv.children())[\n 3].register_forward_hook(get_activation(\"out_conv\", self.core_out)))\n if \"r4\" in self.layer_names:\n self.handles.append(midas.scratch.refinenet4.register_forward_hook(\n get_activation(\"r4\", self.core_out)))\n if \"r3\" in self.layer_names:\n self.handles.append(midas.scratch.refinenet3.register_forward_hook(\n get_activation(\"r3\", self.core_out)))\n if \"r2\" in self.layer_names:\n self.handles.append(midas.scratch.refinenet2.register_forward_hook(\n get_activation(\"r2\", self.core_out)))\n if \"r1\" in self.layer_names:\n self.handles.append(midas.scratch.refinenet1.register_forward_hook(\n get_activation(\"r1\", self.core_out)))\n if \"l4_rn\" in self.layer_names:\n self.handles.append(midas.scratch.layer4_rn.register_forward_hook(\n get_activation(\"l4_rn\", self.core_out)))\n\n return self\n\n def remove_hooks(self):\n for h in self.handles:\n h.remove()\n return self\n\n def __del__(self):\n self.remove_hooks()\n\n def set_output_channels(self, model_type):\n self.output_channels = MIDAS_SETTINGS[model_type]\n\n @staticmethod\n def build(midas_model_type=\"DPT_BEiT_L_384\", train_midas=False, use_pretrained_midas=True, fetch_features=False, freeze_bn=True, force_keep_ar=False, force_reload=False, **kwargs):\n if midas_model_type not in MIDAS_SETTINGS:\n raise ValueError(\n f\"Invalid model type: {midas_model_type}. Must be one of {list(MIDAS_SETTINGS.keys())}\")\n if \"img_size\" in kwargs:\n kwargs = MidasCore.parse_img_size(kwargs)\n img_size = kwargs.pop(\"img_size\", [384, 384])\n print(\"img_size\", img_size)\n midas_path = os.path.join(os.path.dirname(__file__), 'midas_repo')\n midas = torch.hub.load(midas_path, midas_model_type,\n pretrained=use_pretrained_midas, force_reload=force_reload, source='local')\n kwargs.update({'keep_aspect_ratio': force_keep_ar})\n midas_core = MidasCore(midas, trainable=train_midas, fetch_features=fetch_features,\n freeze_bn=freeze_bn, img_size=img_size, **kwargs)\n midas_core.set_output_channels(midas_model_type)\n return midas_core\n\n @staticmethod\n def build_from_config(config):\n return MidasCore.build(**config)\n\n @staticmethod\n def parse_img_size(config):\n assert 'img_size' in config\n if isinstance(config['img_size'], str):\n assert \",\" in config['img_size'], \"img_size should be a string with comma separated img_size=H,W\"\n config['img_size'] = list(map(int, config['img_size'].split(\",\")))\n assert len(\n config['img_size']) == 2, \"img_size should be a string with comma separated img_size=H,W\"\n elif isinstance(config['img_size'], int):\n config['img_size'] = [config['img_size'], config['img_size']]\n else:\n assert isinstance(config['img_size'], list) and len(\n config['img_size']) == 2, \"img_size should be a list of H,W\"\n return config" }, { "identifier": "AttractorLayer", "path": "libs/omnidata_torch/lib/zoe/zoedepth/models/layers/attractor.py", "snippet": "class AttractorLayer(nn.Module):\n def __init__(self, in_features, n_bins, n_attractors=16, mlp_dim=128, min_depth=1e-3, max_depth=10,\n alpha=300, gamma=2, kind='sum', attractor_type='exp', memory_efficient=False):\n \"\"\"\n Attractor layer for bin centers. Bin centers are bounded on the interval (min_depth, max_depth)\n \"\"\"\n super().__init__()\n\n self.n_attractors = n_attractors\n self.n_bins = n_bins\n self.min_depth = min_depth\n self.max_depth = max_depth\n self.alpha = alpha\n self.gamma = gamma\n self.kind = kind\n self.attractor_type = attractor_type\n self.memory_efficient = memory_efficient\n\n self._net = nn.Sequential(\n nn.Conv2d(in_features, mlp_dim, 1, 1, 0),\n nn.ReLU(inplace=True),\n nn.Conv2d(mlp_dim, n_attractors*2, 1, 1, 0), # x2 for linear norm\n nn.ReLU(inplace=True)\n )\n\n def forward(self, x, b_prev, prev_b_embedding=None, interpolate=True, is_for_query=False):\n \"\"\"\n Args:\n x (torch.Tensor) : feature block; shape - n, c, h, w\n b_prev (torch.Tensor) : previous bin centers normed; shape - n, prev_nbins, h, w\n \n Returns:\n tuple(torch.Tensor,torch.Tensor) : new bin centers normed and scaled; shape - n, nbins, h, w\n \"\"\"\n if prev_b_embedding is not None:\n if interpolate:\n prev_b_embedding = nn.functional.interpolate(\n prev_b_embedding, x.shape[-2:], mode='bilinear', align_corners=True)\n x = x + prev_b_embedding\n\n A = self._net(x)\n eps = 1e-3\n A = A + eps\n n, c, h, w = A.shape\n A = A.view(n, self.n_attractors, 2, h, w)\n A_normed = A / A.sum(dim=2, keepdim=True) # n, a, 2, h, w\n A_normed = A[:, :, 0, ...] # n, na, h, w\n\n b_prev = nn.functional.interpolate(\n b_prev, (h, w), mode='bilinear', align_corners=True)\n b_centers = b_prev\n\n if self.attractor_type == 'exp':\n dist = exp_attractor\n else:\n dist = inv_attractor\n\n if not self.memory_efficient:\n func = {'mean': torch.mean, 'sum': torch.sum}[self.kind]\n # .shape N, nbins, h, w\n delta_c = func(dist(A_normed.unsqueeze(\n 2) - b_centers.unsqueeze(1)), dim=1)\n else:\n delta_c = torch.zeros_like(b_centers, device=b_centers.device)\n for i in range(self.n_attractors):\n # .shape N, nbins, h, w\n delta_c += dist(A_normed[:, i, ...].unsqueeze(1) - b_centers)\n\n if self.kind == 'mean':\n delta_c = delta_c / self.n_attractors\n\n b_new_centers = b_centers + delta_c\n B_centers = (self.max_depth - self.min_depth) * \\\n b_new_centers + self.min_depth\n B_centers, _ = torch.sort(B_centers, dim=1)\n B_centers = torch.clip(B_centers, self.min_depth, self.max_depth)\n return b_new_centers, B_centers" }, { "identifier": "AttractorLayerUnnormed", "path": "libs/omnidata_torch/lib/zoe/zoedepth/models/layers/attractor.py", "snippet": "class AttractorLayerUnnormed(nn.Module):\n def __init__(self, in_features, n_bins, n_attractors=16, mlp_dim=128, min_depth=1e-3, max_depth=10,\n alpha=300, gamma=2, kind='sum', attractor_type='exp', memory_efficient=False):\n \"\"\"\n Attractor layer for bin centers. Bin centers are unbounded\n \"\"\"\n super().__init__()\n\n self.n_attractors = n_attractors\n self.n_bins = n_bins\n self.min_depth = min_depth\n self.max_depth = max_depth\n self.alpha = alpha\n self.gamma = gamma\n self.kind = kind\n self.attractor_type = attractor_type\n self.memory_efficient = memory_efficient\n\n self._net = nn.Sequential(\n nn.Conv2d(in_features, mlp_dim, 1, 1, 0),\n nn.ReLU(inplace=True),\n nn.Conv2d(mlp_dim, n_attractors, 1, 1, 0),\n nn.Softplus()\n )\n\n def forward(self, x, b_prev, prev_b_embedding=None, interpolate=True, is_for_query=False):\n \"\"\"\n Args:\n x (torch.Tensor) : feature block; shape - n, c, h, w\n b_prev (torch.Tensor) : previous bin centers normed; shape - n, prev_nbins, h, w\n \n Returns:\n tuple(torch.Tensor,torch.Tensor) : new bin centers unbounded; shape - n, nbins, h, w. Two outputs just to keep the API consistent with the normed version\n \"\"\"\n if prev_b_embedding is not None:\n if interpolate:\n prev_b_embedding = nn.functional.interpolate(\n prev_b_embedding, x.shape[-2:], mode='bilinear', align_corners=True)\n x = x + prev_b_embedding\n\n A = self._net(x)\n n, c, h, w = A.shape\n\n b_prev = nn.functional.interpolate(\n b_prev, (h, w), mode='bilinear', align_corners=True)\n b_centers = b_prev\n\n if self.attractor_type == 'exp':\n dist = exp_attractor\n else:\n dist = inv_attractor\n\n if not self.memory_efficient:\n func = {'mean': torch.mean, 'sum': torch.sum}[self.kind]\n # .shape N, nbins, h, w\n delta_c = func(\n dist(A.unsqueeze(2) - b_centers.unsqueeze(1)), dim=1)\n else:\n delta_c = torch.zeros_like(b_centers, device=b_centers.device)\n for i in range(self.n_attractors):\n delta_c += dist(A[:, i, ...].unsqueeze(1) -\n b_centers) # .shape N, nbins, h, w\n\n if self.kind == 'mean':\n delta_c = delta_c / self.n_attractors\n\n b_new_centers = b_centers + delta_c\n B_centers = b_new_centers\n\n return b_new_centers, B_centers" }, { "identifier": "ConditionalLogBinomial", "path": "libs/omnidata_torch/lib/zoe/zoedepth/models/layers/dist_layers.py", "snippet": "class ConditionalLogBinomial(nn.Module):\n def __init__(self, in_features, condition_dim, n_classes=256, bottleneck_factor=2, p_eps=1e-4, max_temp=50, min_temp=1e-7, act=torch.softmax):\n \"\"\"Conditional Log Binomial distribution\n\n Args:\n in_features (int): number of input channels in main feature\n condition_dim (int): number of input channels in condition feature\n n_classes (int, optional): Number of classes. Defaults to 256.\n bottleneck_factor (int, optional): Hidden dim factor. Defaults to 2.\n p_eps (float, optional): small eps value. Defaults to 1e-4.\n max_temp (float, optional): Maximum temperature of output distribution. Defaults to 50.\n min_temp (float, optional): Minimum temperature of output distribution. Defaults to 1e-7.\n \"\"\"\n super().__init__()\n self.p_eps = p_eps\n self.max_temp = max_temp\n self.min_temp = min_temp\n self.log_binomial_transform = LogBinomial(n_classes, act=act)\n bottleneck = (in_features + condition_dim) // bottleneck_factor\n self.mlp = nn.Sequential(\n nn.Conv2d(in_features + condition_dim, bottleneck,\n kernel_size=1, stride=1, padding=0),\n nn.GELU(),\n # 2 for p linear norm, 2 for t linear norm\n nn.Conv2d(bottleneck, 2+2, kernel_size=1, stride=1, padding=0),\n nn.Softplus()\n )\n\n def forward(self, x, cond):\n \"\"\"Forward pass\n\n Args:\n x (torch.Tensor - NCHW): Main feature\n cond (torch.Tensor - NCHW): condition feature\n\n Returns:\n torch.Tensor: Output log binomial distribution\n \"\"\"\n pt = self.mlp(torch.concat((x, cond), dim=1))\n p, t = pt[:, :2, ...], pt[:, 2:, ...]\n\n p = p + self.p_eps\n p = p[:, 0, ...] / (p[:, 0, ...] + p[:, 1, ...])\n\n t = t + self.p_eps\n t = t[:, 0, ...] / (t[:, 0, ...] + t[:, 1, ...])\n t = t.unsqueeze(1)\n t = (self.max_temp - self.min_temp) * t + self.min_temp\n\n return self.log_binomial_transform(p, t)" }, { "identifier": "Projector", "path": "libs/omnidata_torch/lib/zoe/zoedepth/models/layers/localbins_layers.py", "snippet": "class Projector(nn.Module):\n def __init__(self, in_features, out_features, mlp_dim=128):\n \"\"\"Projector MLP\n\n Args:\n in_features (int): input channels\n out_features (int): output channels\n mlp_dim (int, optional): hidden dimension. Defaults to 128.\n \"\"\"\n super().__init__()\n\n self._net = nn.Sequential(\n nn.Conv2d(in_features, mlp_dim, 1, 1, 0),\n nn.ReLU(inplace=True),\n nn.Conv2d(mlp_dim, out_features, 1, 1, 0),\n )\n\n def forward(self, x):\n return self._net(x)" }, { "identifier": "SeedBinRegressor", "path": "libs/omnidata_torch/lib/zoe/zoedepth/models/layers/localbins_layers.py", "snippet": "class SeedBinRegressor(nn.Module):\n def __init__(self, in_features, n_bins=16, mlp_dim=256, min_depth=1e-3, max_depth=10):\n \"\"\"Bin center regressor network. Bin centers are bounded on (min_depth, max_depth) interval.\n\n Args:\n in_features (int): input channels\n n_bins (int, optional): Number of bin centers. Defaults to 16.\n mlp_dim (int, optional): Hidden dimension. Defaults to 256.\n min_depth (float, optional): Min depth value. Defaults to 1e-3.\n max_depth (float, optional): Max depth value. Defaults to 10.\n \"\"\"\n super().__init__()\n self.version = \"1_1\"\n self.min_depth = min_depth\n self.max_depth = max_depth\n\n self._net = nn.Sequential(\n nn.Conv2d(in_features, mlp_dim, 1, 1, 0),\n nn.ReLU(inplace=True),\n nn.Conv2d(mlp_dim, n_bins, 1, 1, 0),\n nn.ReLU(inplace=True)\n )\n\n def forward(self, x):\n \"\"\"\n Returns tensor of bin_width vectors (centers). One vector b for every pixel\n \"\"\"\n B = self._net(x)\n eps = 1e-3\n B = B + eps\n B_widths_normed = B / B.sum(dim=1, keepdim=True)\n B_widths = (self.max_depth - self.min_depth) * \\\n B_widths_normed # .shape NCHW\n # pad has the form (left, right, top, bottom, front, back)\n B_widths = nn.functional.pad(\n B_widths, (0, 0, 0, 0, 1, 0), mode='constant', value=self.min_depth)\n B_edges = torch.cumsum(B_widths, dim=1) # .shape NCHW\n\n B_centers = 0.5 * (B_edges[:, :-1, ...] + B_edges[:, 1:, ...])\n return B_widths_normed, B_centers" }, { "identifier": "SeedBinRegressorUnnormed", "path": "libs/omnidata_torch/lib/zoe/zoedepth/models/layers/localbins_layers.py", "snippet": "class SeedBinRegressorUnnormed(nn.Module):\n def __init__(self, in_features, n_bins=16, mlp_dim=256, min_depth=1e-3, max_depth=10):\n \"\"\"Bin center regressor network. Bin centers are unbounded\n\n Args:\n in_features (int): input channels\n n_bins (int, optional): Number of bin centers. Defaults to 16.\n mlp_dim (int, optional): Hidden dimension. Defaults to 256.\n min_depth (float, optional): Not used. (for compatibility with SeedBinRegressor)\n max_depth (float, optional): Not used. (for compatibility with SeedBinRegressor)\n \"\"\"\n super().__init__()\n self.version = \"1_1\"\n self._net = nn.Sequential(\n nn.Conv2d(in_features, mlp_dim, 1, 1, 0),\n nn.ReLU(inplace=True),\n nn.Conv2d(mlp_dim, n_bins, 1, 1, 0),\n nn.Softplus()\n )\n\n def forward(self, x):\n \"\"\"\n Returns tensor of bin_width vectors (centers). One vector b for every pixel\n \"\"\"\n B_centers = self._net(x)\n return B_centers, B_centers" }, { "identifier": "load_state_from_resource", "path": "libs/omnidata_torch/lib/zoe/zoedepth/models/model_io.py", "snippet": "def load_state_from_resource(model, resource: str):\n \"\"\"Loads weights to the model from a given resource. A resource can be of following types:\n 1. URL. Prefixed with \"url::\"\n e.g. url::http(s)://url.resource.com/ckpt.pt\n\n 2. Local path. Prefixed with \"local::\"\n e.g. local::/path/to/ckpt.pt\n\n\n Args:\n model (torch.nn.Module): Model\n resource (str): resource string\n\n Returns:\n torch.nn.Module: Model with loaded weights\n \"\"\"\n print(f\"Using pretrained resource {resource}\")\n\n if resource.startswith('url::'):\n url = resource.split('url::')[1]\n return load_state_dict_from_url(model, url, progress=True)\n\n elif resource.startswith('local::'):\n path = resource.split('local::')[1]\n return load_wts(model, path)\n \n else:\n raise ValueError(\"Invalid resource type, only url:: and local:: are supported\")" } ]
import itertools import torch import torch.nn as nn from ..depth_model import DepthModel from ..base_models.midas import MidasCore from ..layers.attractor import AttractorLayer, AttractorLayerUnnormed from ..layers.dist_layers import ConditionalLogBinomial from ..layers.localbins_layers import (Projector, SeedBinRegressor, SeedBinRegressorUnnormed) from ..model_io import load_state_from_resource
8,666
# MIT License # Copyright (c) 2022 Intelligent Systems Lab Org # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # File author: Shariq Farooq Bhat class ZoeDepth(DepthModel): def __init__(self, core, n_bins=64, bin_centers_type="softplus", bin_embedding_dim=128, min_depth=1e-3, max_depth=10, n_attractors=[16, 8, 4, 1], attractor_alpha=300, attractor_gamma=2, attractor_kind='sum', attractor_type='exp', min_temp=5, max_temp=50, train_midas=True, midas_lr_factor=10, encoder_lr_factor=10, pos_enc_lr_factor=10, inverse_midas=False, **kwargs): """ZoeDepth model. This is the version of ZoeDepth that has a single metric head Args: core (models.base_models.midas.MidasCore): The base midas model that is used for extraction of "relative" features n_bins (int, optional): Number of bin centers. Defaults to 64. bin_centers_type (str, optional): "normed" or "softplus". Activation type used for bin centers. For "normed" bin centers, linear normalization trick is applied. This results in bounded bin centers. For "softplus", softplus activation is used and thus are unbounded. Defaults to "softplus". bin_embedding_dim (int, optional): bin embedding dimension. Defaults to 128. min_depth (float, optional): Lower bound for normed bin centers. Defaults to 1e-3. max_depth (float, optional): Upper bound for normed bin centers. Defaults to 10. n_attractors (List[int], optional): Number of bin attractors at decoder layers. Defaults to [16, 8, 4, 1]. attractor_alpha (int, optional): Proportional attractor strength. Refer to models.layers.attractor for more details. Defaults to 300. attractor_gamma (int, optional): Exponential attractor strength. Refer to models.layers.attractor for more details. Defaults to 2. attractor_kind (str, optional): Attraction aggregation "sum" or "mean". Defaults to 'sum'. attractor_type (str, optional): Type of attractor to use; "inv" (Inverse attractor) or "exp" (Exponential attractor). Defaults to 'exp'. min_temp (int, optional): Lower bound for temperature of output probability distribution. Defaults to 5. max_temp (int, optional): Upper bound for temperature of output probability distribution. Defaults to 50. train_midas (bool, optional): Whether to train "core", the base midas model. Defaults to True. midas_lr_factor (int, optional): Learning rate reduction factor for base midas model except its encoder and positional encodings. Defaults to 10. encoder_lr_factor (int, optional): Learning rate reduction factor for the encoder in midas model. Defaults to 10. pos_enc_lr_factor (int, optional): Learning rate reduction factor for positional encodings in the base midas model. Defaults to 10. """ super().__init__() self.core = core self.max_depth = max_depth self.min_depth = min_depth self.min_temp = min_temp self.bin_centers_type = bin_centers_type self.midas_lr_factor = midas_lr_factor self.encoder_lr_factor = encoder_lr_factor self.pos_enc_lr_factor = pos_enc_lr_factor self.train_midas = train_midas self.inverse_midas = inverse_midas if self.encoder_lr_factor <= 0: self.core.freeze_encoder( freeze_rel_pos=self.pos_enc_lr_factor <= 0) N_MIDAS_OUT = 32 btlnck_features = self.core.output_channels[0] num_out_features = self.core.output_channels[1:] self.conv2 = nn.Conv2d(btlnck_features, btlnck_features, kernel_size=1, stride=1, padding=0) # btlnck conv if bin_centers_type == "normed": SeedBinRegressorLayer = SeedBinRegressor Attractor = AttractorLayer elif bin_centers_type == "softplus": SeedBinRegressorLayer = SeedBinRegressorUnnormed Attractor = AttractorLayerUnnormed elif bin_centers_type == "hybrid1": SeedBinRegressorLayer = SeedBinRegressor Attractor = AttractorLayerUnnormed elif bin_centers_type == "hybrid2": SeedBinRegressorLayer = SeedBinRegressorUnnormed Attractor = AttractorLayer else: raise ValueError( "bin_centers_type should be one of 'normed', 'softplus', 'hybrid1', 'hybrid2'") self.seed_bin_regressor = SeedBinRegressorLayer( btlnck_features, n_bins=n_bins, min_depth=min_depth, max_depth=max_depth) self.seed_projector = Projector(btlnck_features, bin_embedding_dim) self.projectors = nn.ModuleList([ Projector(num_out, bin_embedding_dim) for num_out in num_out_features ]) self.attractors = nn.ModuleList([ Attractor(bin_embedding_dim, n_bins, n_attractors=n_attractors[i], min_depth=min_depth, max_depth=max_depth, alpha=attractor_alpha, gamma=attractor_gamma, kind=attractor_kind, attractor_type=attractor_type) for i in range(len(num_out_features)) ]) last_in = N_MIDAS_OUT + 1 # +1 for relative depth # use log binomial instead of softmax
# MIT License # Copyright (c) 2022 Intelligent Systems Lab Org # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # File author: Shariq Farooq Bhat class ZoeDepth(DepthModel): def __init__(self, core, n_bins=64, bin_centers_type="softplus", bin_embedding_dim=128, min_depth=1e-3, max_depth=10, n_attractors=[16, 8, 4, 1], attractor_alpha=300, attractor_gamma=2, attractor_kind='sum', attractor_type='exp', min_temp=5, max_temp=50, train_midas=True, midas_lr_factor=10, encoder_lr_factor=10, pos_enc_lr_factor=10, inverse_midas=False, **kwargs): """ZoeDepth model. This is the version of ZoeDepth that has a single metric head Args: core (models.base_models.midas.MidasCore): The base midas model that is used for extraction of "relative" features n_bins (int, optional): Number of bin centers. Defaults to 64. bin_centers_type (str, optional): "normed" or "softplus". Activation type used for bin centers. For "normed" bin centers, linear normalization trick is applied. This results in bounded bin centers. For "softplus", softplus activation is used and thus are unbounded. Defaults to "softplus". bin_embedding_dim (int, optional): bin embedding dimension. Defaults to 128. min_depth (float, optional): Lower bound for normed bin centers. Defaults to 1e-3. max_depth (float, optional): Upper bound for normed bin centers. Defaults to 10. n_attractors (List[int], optional): Number of bin attractors at decoder layers. Defaults to [16, 8, 4, 1]. attractor_alpha (int, optional): Proportional attractor strength. Refer to models.layers.attractor for more details. Defaults to 300. attractor_gamma (int, optional): Exponential attractor strength. Refer to models.layers.attractor for more details. Defaults to 2. attractor_kind (str, optional): Attraction aggregation "sum" or "mean". Defaults to 'sum'. attractor_type (str, optional): Type of attractor to use; "inv" (Inverse attractor) or "exp" (Exponential attractor). Defaults to 'exp'. min_temp (int, optional): Lower bound for temperature of output probability distribution. Defaults to 5. max_temp (int, optional): Upper bound for temperature of output probability distribution. Defaults to 50. train_midas (bool, optional): Whether to train "core", the base midas model. Defaults to True. midas_lr_factor (int, optional): Learning rate reduction factor for base midas model except its encoder and positional encodings. Defaults to 10. encoder_lr_factor (int, optional): Learning rate reduction factor for the encoder in midas model. Defaults to 10. pos_enc_lr_factor (int, optional): Learning rate reduction factor for positional encodings in the base midas model. Defaults to 10. """ super().__init__() self.core = core self.max_depth = max_depth self.min_depth = min_depth self.min_temp = min_temp self.bin_centers_type = bin_centers_type self.midas_lr_factor = midas_lr_factor self.encoder_lr_factor = encoder_lr_factor self.pos_enc_lr_factor = pos_enc_lr_factor self.train_midas = train_midas self.inverse_midas = inverse_midas if self.encoder_lr_factor <= 0: self.core.freeze_encoder( freeze_rel_pos=self.pos_enc_lr_factor <= 0) N_MIDAS_OUT = 32 btlnck_features = self.core.output_channels[0] num_out_features = self.core.output_channels[1:] self.conv2 = nn.Conv2d(btlnck_features, btlnck_features, kernel_size=1, stride=1, padding=0) # btlnck conv if bin_centers_type == "normed": SeedBinRegressorLayer = SeedBinRegressor Attractor = AttractorLayer elif bin_centers_type == "softplus": SeedBinRegressorLayer = SeedBinRegressorUnnormed Attractor = AttractorLayerUnnormed elif bin_centers_type == "hybrid1": SeedBinRegressorLayer = SeedBinRegressor Attractor = AttractorLayerUnnormed elif bin_centers_type == "hybrid2": SeedBinRegressorLayer = SeedBinRegressorUnnormed Attractor = AttractorLayer else: raise ValueError( "bin_centers_type should be one of 'normed', 'softplus', 'hybrid1', 'hybrid2'") self.seed_bin_regressor = SeedBinRegressorLayer( btlnck_features, n_bins=n_bins, min_depth=min_depth, max_depth=max_depth) self.seed_projector = Projector(btlnck_features, bin_embedding_dim) self.projectors = nn.ModuleList([ Projector(num_out, bin_embedding_dim) for num_out in num_out_features ]) self.attractors = nn.ModuleList([ Attractor(bin_embedding_dim, n_bins, n_attractors=n_attractors[i], min_depth=min_depth, max_depth=max_depth, alpha=attractor_alpha, gamma=attractor_gamma, kind=attractor_kind, attractor_type=attractor_type) for i in range(len(num_out_features)) ]) last_in = N_MIDAS_OUT + 1 # +1 for relative depth # use log binomial instead of softmax
self.conditional_log_binomial = ConditionalLogBinomial(
4
2023-12-06 07:29:34+00:00
12k
FrozenBurning/PrimDiffusion
visualize.py
[ { "identifier": "RayMarcher", "path": "dva/ray_marcher.py", "snippet": "class RayMarcher(nn.Module):\n def __init__(\n self,\n image_height,\n image_width,\n volradius,\n fadescale=8.0,\n fadeexp=8.0,\n dt=1.0,\n ray_subsample_factor=1,\n accum=2,\n termthresh=0.99,\n blocksize=None,\n with_t_img=True,\n chlast=False,\n assets=None,\n ):\n super().__init__()\n\n # TODO: add config?\n self.image_height = image_height\n self.image_width = image_width\n self.volradius = volradius\n self.dt = dt\n\n self.fadescale = fadescale\n self.fadeexp = fadeexp\n\n # NOTE: this seems to not work for other configs?\n if blocksize is None:\n blocksize = (8, 16)\n\n self.blocksize = blocksize\n self.with_t_img = with_t_img\n self.chlast = chlast\n\n self.accum = accum\n self.termthresh = termthresh\n\n base_pixel_coords = th.stack(\n th.meshgrid(\n th.arange(self.image_height, dtype=th.float32),\n th.arange(self.image_width, dtype=th.float32),\n )[::-1],\n dim=-1,\n )\n self.register_buffer(\"base_pixel_coords\", base_pixel_coords, persistent=False)\n self.fixed_bvh_cache = {-1: (th.empty(0), th.empty(0), th.empty(0))}\n self.ray_subsample_factor = ray_subsample_factor\n\n def _set_pix_coords(self):\n dev = self.base_pixel_coords.device\n self.base_pixel_coords = th.stack(\n th.meshgrid(\n th.arange(self.image_height, dtype=th.float32, device=dev),\n th.arange(self.image_width, dtype=th.float32, device=dev),\n )[::-1],\n dim=-1,\n )\n\n def resize(self, h: int, w: int):\n self.image_height = h\n self.image_width = w\n\n self._set_pix_coords()\n\n def forward(\n self,\n prim_rgba: th.Tensor,\n prim_pos: th.Tensor,\n prim_rot: th.Tensor,\n prim_scale: th.Tensor,\n K: th.Tensor,\n RT: th.Tensor,\n ray_subsample_factor: Optional[int] = None,\n ):\n \"\"\"\n Args:\n prim_rgba: primitive payload [B, K, 4, S, S, S],\n K - # of primitives, S - primitive size\n prim_pos: locations [B, K, 3]\n prim_rot: rotations [B, K, 3, 3]\n prim_scale: scales [B, K, 3]\n K: intrinsics [B, 3, 3]\n RT: extrinsics [B, 3, 4]\n Returns:\n a dict of tensors\n \"\"\"\n # TODO: maybe we can re-use mvpraymarcher?\n B = prim_rgba.shape[0]\n device = prim_rgba.device\n\n # TODO: this should return focal 2x2?\n camera = convert_camera_parameters(RT, K)\n camera = {k: v.contiguous() for k, v in camera.items()}\n\n dt = self.dt / self.volradius\n\n if ray_subsample_factor is None:\n ray_subsample_factor = self.ray_subsample_factor\n\n if ray_subsample_factor > 1 and self.training:\n pixel_coords = subsample_pixel_coords(\n self.base_pixel_coords, int(B), ray_subsample_factor\n )\n elif ray_subsample_factor > 1:\n pixel_coords = resize_pixel_coords(\n self.base_pixel_coords,\n int(B),\n ray_subsample_factor,\n )\n else:\n pixel_coords = (\n self.base_pixel_coords[np.newaxis].expand(B, -1, -1, -1).contiguous()\n )\n\n prim_pos = prim_pos / self.volradius\n\n focal = th.diagonal(camera[\"focal\"], dim1=1, dim2=2).contiguous()\n\n # TODO: port this?\n raypos, raydir, tminmax = compute_raydirs(\n viewpos=camera[\"campos\"],\n viewrot=camera[\"camrot\"],\n focal=focal,\n princpt=camera[\"princpt\"],\n pixelcoords=pixel_coords,\n volradius=self.volradius,\n )\n\n rgba = mvpraymarch(\n raypos,\n raydir,\n stepsize=dt,\n tminmax=tminmax,\n algo=0,\n template=prim_rgba.permute(0, 1, 3, 4, 5, 2).contiguous(),\n warp=None,\n termthresh=self.termthresh,\n primtransf=(prim_pos, prim_rot, prim_scale),\n fadescale=self.fadescale,\n fadeexp=self.fadeexp,\n usebvh=\"fixedorder\",\n chlast=True,\n )\n\n rgba = rgba.permute(0, 3, 1, 2)\n\n preds = {\n \"rgba_image\": rgba,\n \"pixel_coords\": pixel_coords,\n }\n\n return preds" }, { "identifier": "generate_colored_boxes", "path": "dva/ray_marcher.py", "snippet": "def generate_colored_boxes(template, prim_rot, alpha=10000.0, seed=123456):\n B = template.shape[0]\n output = template.clone()\n device = template.device\n\n lightdir = -3 * th.ones([B, 3], dtype=th.float32, device=device)\n lightdir = lightdir / th.norm(lightdir, p=2, dim=1, keepdim=True)\n\n zz, yy, xx = th.meshgrid(\n th.linspace(-1.0, 1.0, template.size(-1), device=device),\n th.linspace(-1.0, 1.0, template.size(-1), device=device),\n th.linspace(-1.0, 1.0, template.size(-1), device=device),\n )\n primnormalx = th.where(\n (th.abs(xx) >= th.abs(yy)) & (th.abs(xx) >= th.abs(zz)),\n th.sign(xx) * th.ones_like(xx),\n th.zeros_like(xx),\n )\n primnormaly = th.where(\n (th.abs(yy) >= th.abs(xx)) & (th.abs(yy) >= th.abs(zz)),\n th.sign(yy) * th.ones_like(xx),\n th.zeros_like(xx),\n )\n primnormalz = th.where(\n (th.abs(zz) >= th.abs(xx)) & (th.abs(zz) >= th.abs(yy)),\n th.sign(zz) * th.ones_like(xx),\n th.zeros_like(xx),\n )\n primnormal = th.stack([primnormalx, -primnormaly, -primnormalz], dim=-1)\n primnormal = primnormal / th.sqrt(th.sum(primnormal**2, dim=-1, keepdim=True))\n\n output[:, :, 3, :, :, :] = alpha\n\n np.random.seed(seed)\n\n for i in range(template.size(1)):\n # generating a random color\n output[:, i, 0, :, :, :] = np.random.rand() * 255.0\n output[:, i, 1, :, :, :] = np.random.rand() * 255.0\n output[:, i, 2, :, :, :] = np.random.rand() * 255.0\n\n # get light direction in local coordinate system?\n lightdir0 = lightdir\n mult = th.sum(\n lightdir0[:, None, None, None, :] * primnormal[np.newaxis], dim=-1\n )[:, np.newaxis, :, :, :].clamp(min=0.2)\n output[:, i, :3, :, :, :] *= 1.4 * mult\n return output" }, { "identifier": "RenderPeopleSViewDataset", "path": "primdiffusion/dataset/renderpeople_crossid_dataset.py", "snippet": "class RenderPeopleSViewDataset(Dataset):\n def __init__(\n self,\n root_dir,\n subject_ids,\n smpl_poses,\n image,\n image_mask,\n image_part_mask,\n cam_path,\n frame_list=None,\n cameras=None,\n cond_cameras=None,\n sample_cameras=True,\n camera_id=None,\n image_height=1024,\n image_width=1024,\n is_train=True,\n **kwargs,\n ):\n super().__init__()\n # subject ids is a text file contains list of subject ids\n self.image_height = image_height\n self.image_width = image_width\n self.ref_frame = 0\n\n with open(subject_ids, 'r') as f:\n human_list = f.read().splitlines()\n self.subject_ids = human_list\n self.root_dir = root_dir\n\n if frame_list is None:\n n_frames = len(os.listdir(os.path.join(self.root_dir, self.subject_ids[0], 'img', 'camera0000')))\n self.frame_list = [str(fid) for fid in range(n_frames)]\n\n self.image_path = image\n self.image_mask_path = image_mask\n self.image_part_mask_path = image_part_mask\n\n self.is_train = is_train\n all_cameras = self.load_all_cameras(cam_path)\n\n # TODO: inference logics\n if not self.is_train:\n assert not sample_cameras\n assert camera_id is not None\n\n self.cameras = all_cameras\n\n self.cond_cameras = cond_cameras\n\n self.sample_cameras = sample_cameras\n self.camera_id = camera_id\n\n self.all_smpl = self.load_all_smpl(smpl_poses)\n\n def load_all_smpl(self, smpl_poses):\n all_smpl = {}\n for people_id in self.subject_ids:\n current_smpl_path = smpl_poses.format(people_id=people_id)\n smpl_param = dict(np.load(current_smpl_path, allow_pickle=True))['smpl'].item()\n poses = np.zeros((smpl_param['body_pose'].shape[0], 72)).astype(np.float32)\n poses[:, :3] = np.array(smpl_param['global_orient']).astype(np.float32)\n poses[:, 3:] = np.array(smpl_param['body_pose']).astype(np.float32)\n\n shapes = np.array(smpl_param['betas']).astype(np.float32)\n shapes = np.repeat(shapes[:], poses.shape[0], axis=0)\n Rh = smpl_param['global_orient'].astype(np.float32)\n Th = smpl_param['transl'].astype(np.float32)\n current_smpl = {\n 'shapes': shapes,\n 'Rh': Rh * 0, #FIXME: hack\n 'Th': Th,\n 'poses': poses,\n }\n all_smpl[people_id] = current_smpl\n\n return all_smpl\n\n def load_all_cameras(self, camera_path):\n # input path to camera.json under synbody sequence\n # all_cameras is dict of dict\n all_cameras = {}\n for people_id in self.subject_ids:\n current_camera_path = camera_path.format(people_id=people_id)\n current_camera = {}\n with open(current_camera_path) as f:\n camera = json.load(f)\n for view_index in range(len(camera.keys())):\n K, R, T, _ = get_KRTD(camera, view_index)\n current_camera['camera{:04d}'.format(view_index)] = {\n \"Rt\": np.concatenate([R, T[..., None]], axis=1).astype(np.float32),\n \"K\": K.astype(np.float32),\n }\n for c in current_camera.values():\n c[\"cam_pos\"] = -np.dot(c[\"Rt\"][:3, :3].T, c[\"Rt\"][:3, 3])\n c[\"Rt\"][:, -1] *= 1000.0\n all_cameras[people_id] = current_camera\n return all_cameras\n\n def __len__(self):\n return len(self.subject_ids) * 200\n\n def __getitem__(self, idx):\n # idx is subject_id wise index\n people_id = self.subject_ids[idx % len(self.subject_ids)]\n\n # random sample frames\n frame = (\n random.choice(self.frame_list)\n )\n\n # random sample cameras\n camera_id = (\n random.choice(list(self.cameras[people_id].keys()))\n if self.sample_cameras\n else self.camera_id\n )\n fmts = dict(people_id=people_id, frame=int(frame), camera=camera_id)\n\n sample = {\"index\": idx, **fmts}\n\n sample.update(load_smpl_params(self.all_smpl[people_id], int(frame)))\n\n ref_frame_smpl = {'ref_' + k: v for k, v in load_smpl_params(self.all_smpl[people_id], int(self.ref_frame)).items()}\n sample.update(ref_frame_smpl)\n\n sample[\"image\"] = np.transpose(\n cv2.imread(self.image_path.format(**fmts))[..., ::-1].astype(np.float32),\n axes=(2, 0, 1),\n )\n\n # reading all the cond images\n if self.cond_cameras:\n sample[\"cond_image\"] = []\n sample[\"cond_Rt\"] = []\n sample[\"cond_K\"] = []\n # for cond_camera_id in self.cond_cameras:\n # FIXME: hack for random condition views\n cond_camera_id = random.choice(list(self.cameras[people_id].keys()))\n if True:\n cond_image = np.transpose(\n cv2.imread(\n self.image_path.format(\n people_id=people_id, frame=int(self.ref_frame), camera=cond_camera_id\n )\n )[..., ::-1].astype(np.float32),\n axes=(2, 0, 1),\n )\n sample[\"cond_image\"].append(cond_image)\n sample[\"cond_Rt\"].append(self.cameras[people_id][cond_camera_id][\"Rt\"])\n sample[\"cond_K\"].append(self.cameras[people_id][cond_camera_id][\"K\"])\n\n for key in [\"image\", \"K\", \"Rt\"]:\n sample[f\"cond_{key}\"] = np.stack(sample[f\"cond_{key}\"], axis=0)\n\n sample[\"cond_cameras\"] = self.cond_cameras[:]\n\n sample[\"image\"] = np.transpose(\n cv2.imread(self.image_path.format(**fmts))[..., ::-1].astype(np.float32),\n axes=(2, 0, 1),\n )\n\n image_mask = cv2.imread(self.image_mask_path.format(**fmts))\n border = 3\n kernel = np.ones((border, border), np.uint8)\n msk_erode = cv2.erode(image_mask.copy(), kernel)[np.newaxis, ..., 0]\n sample[\"image_mask\"] = (msk_erode != 0).astype(np.float32)\n\n image_part_mask = cv2.imread(self.image_part_mask_path.format(**fmts))\n part_msk_erode = cv2.erode(image_part_mask.copy(), kernel)[np.newaxis, ..., 0]\n sample[\"image_part_mask\"] = part_msk_erode\n\n sample[\"image_bg\"] = sample[\"image\"] * ~(sample[\"image_part_mask\"] != 0)\n\n sample.update(self.cameras[people_id][camera_id])\n\n return sample\n \n def gen_inf_cameras(self, num_views = 5):\n training_views = self.cameras[self.subject_ids[0]]\n self.training_views = training_views\n num_training_views = len(training_views.keys())\n interpolation_anchors = []\n for view_index in range(num_training_views):\n Rt = training_views['camera{:04d}'.format(view_index)]['Rt']\n K = training_views['camera{:04d}'.format(view_index)]['K']\n rot = Rt[:, :3]\n trans = Rt[:, 3]\n interpolation_anchors.append((rot, trans))\n interpolated_poses = interpolate_poses(interpolation_anchors, num_views)\n\n inf_camera = {}\n for people_id in self.subject_ids:\n current_camera = {}\n for view_index in range(len(interpolated_poses)):\n R, T = interpolated_poses[view_index]\n current_camera['camera{:04d}'.format(view_index)] = {\n \"Rt\": np.concatenate([R, T[..., None]], axis=1).astype(np.float32),\n \"K\": K.astype(np.float32),\n }\n for c in current_camera.values():\n c[\"cam_pos\"] = -np.dot(c[\"Rt\"][:3, :3].T, c[\"Rt\"][:3, 3])\n # c[\"Rt\"][:, -1] *= 1000.0\n inf_camera[people_id] = current_camera\n self.inf_cameras = inf_camera\n\n\n def inf_sample(self, people_id, camera_id, frame_id, cond_sample):\n fmts = dict(people_id=people_id, frame=int(frame_id), camera=camera_id)\n sample = {}\n sample.update({**fmts})\n\n sample.update(load_smpl_params(self.all_smpl[people_id], int(frame_id)))\n\n sample.update(self.inf_cameras[people_id][camera_id])\n\n for k, v in sample.items():\n if isinstance(v, np.ndarray):\n sample[k] = v[None, ...]\n\n sample.update(cond_sample)\n return sample\n\n def cond_sample(self, people_id):\n sample = {}\n # reading all the cond images\n if self.cond_cameras:\n sample[\"cond_image\"] = []\n sample[\"cond_Rt\"] = []\n sample[\"cond_K\"] = []\n cond_camera_id = random.choice(list(self.cameras[people_id].keys()))\n if True:\n cond_image = np.transpose(\n cv2.imread(\n self.image_path.format(\n people_id=people_id, frame=int(self.ref_frame), camera=cond_camera_id\n )\n )[..., ::-1].astype(np.float32),\n axes=(2, 0, 1),\n )\n sample[\"cond_image\"].append(cond_image)\n sample[\"cond_Rt\"].append(self.cameras[people_id][cond_camera_id][\"Rt\"])\n sample[\"cond_K\"].append(self.cameras[people_id][cond_camera_id][\"K\"])\n\n for key in [\"image\", \"K\", \"Rt\"]:\n sample[f\"cond_{key}\"] = np.stack(sample[f\"cond_{key}\"], axis=0)\n\n sample[\"cond_cameras\"] = self.cond_cameras[:]\n for k, v in sample.items():\n if isinstance(v, np.ndarray):\n sample[k] = v[None, ...]\n return sample\n \n\n def inf_sample_wsmpl(self, people_id, camera_id, frame_id, cond_sample, smpl_param):\n fmts = dict(people_id=people_id, frame=int(frame_id), camera=camera_id)\n sample = {}\n sample.update({**fmts})\n\n sample.update(load_smpl_params(smpl_param, int(frame_id)))\n\n sample.update(self.inf_cameras[people_id][camera_id])\n\n for k, v in sample.items():\n if isinstance(v, np.ndarray):\n sample[k] = v[None, ...]\n\n sample.update(cond_sample)\n return sample\n\n def sample_cam_smpl(self):\n people_id = random.choice(self.subject_ids)\n frame_id = random.choice(self.frame_list)\n camera_id = random.choice(list(self.cameras[people_id].keys()))\n fmts = dict(people_id=people_id, frame=int(frame_id), camera=camera_id)\n sample = {}\n sample.update({**fmts})\n sample.update(load_smpl_params(self.all_smpl[people_id], int(frame_id)))\n sample.update(self.cameras[people_id][camera_id])\n for k, v in sample.items():\n if isinstance(v, np.ndarray):\n sample[k] = v[None, ...]\n return sample" }, { "identifier": "load_static_assets_crossid_smpl", "path": "dva/io.py", "snippet": "def load_static_assets_crossid_smpl(config):\n # with chumpy dependency!!!\n data_struct = read_pickle(config.data.smpl_topology)\n vt = np.load(os.path.join(os.path.dirname(config.data.smpl_topology), 'basicModel_vt.npy'))\n ft = np.load(os.path.join(os.path.dirname(config.data.smpl_topology), 'basicModel_ft.npy'))\n\n n_verts = data_struct[\"v_template\"].shape[0]\n\n topology = AttrDict(\n dict(\n vi=data_struct[\"f\"].astype(np.int64),\n vt=vt.astype(np.float32),\n vti=ft.astype(np.int64),\n n_verts=n_verts,\n )\n )\n\n topology.v2uv = compute_v2uv(topology.n_verts, topology.vi, topology.vti)\n\n nbs_idxs, nbs_weights = compute_neighbours(topology.n_verts, topology[\"vi\"])\n topology.nbs_idxs = nbs_idxs\n topology.nbs_weights = nbs_weights\n\n static_assets = AttrDict(\n dict(\n topology=topology,\n lbs_template_verts=data_struct[\"v_template\"],\n smpl_path=config.smpl_dir,\n )\n )\n\n if \"ref_frame\" in config: \n current_smpl_path = config.data.smpl_poses.format(people_id='seq_000016-rp_alison_rigged_002')\n smpl_param = dict(np.load(current_smpl_path, allow_pickle=True))['smpl'].item()\n poses = np.zeros((smpl_param['body_pose'].shape[0], 72)).astype(np.float32)\n poses[:, :3] = np.array(smpl_param['global_orient']).astype(np.float32)\n poses[:, 3:] = np.array(smpl_param['body_pose']).astype(np.float32)\n shapes = np.array(smpl_param['betas']).astype(np.float32)\n shapes = np.repeat(shapes[:], poses.shape[0], axis=0)\n Rh = smpl_param['global_orient'].astype(np.float32)\n Th = smpl_param['transl'].astype(np.float32)\n current_smpl = {\n 'shapes': shapes,\n 'Rh': Rh * 0, #FIXME: hack\n 'Th': Th,\n 'poses': poses,\n }\n\n static_assets[\"ref_frame\"] = {k: v[config.ref_frame][None, ...] for k, v in current_smpl.items()}\n\n return static_assets" }, { "identifier": "load_from_config", "path": "dva/io.py", "snippet": "def load_from_config(config, **kwargs):\n \"\"\"Instantiate an object given a config and arguments.\"\"\"\n assert \"class_name\" in config and \"module_name\" not in config\n config = copy.deepcopy(config)\n class_name = config.pop(\"class_name\")\n object_class = load_class(class_name)\n return object_class(**config, **kwargs)" }, { "identifier": "to_device", "path": "dva/utils.py", "snippet": "def to_device(values, device=None, non_blocking=True):\n \"\"\"Transfer a set of values to the device.\n Args:\n values: a nested dict/list/tuple of tensors\n device: argument to `to()` for the underlying vector\n NOTE:\n if the device is not specified, using `th.cuda()`\n \"\"\"\n if device is None:\n device = th.device(\"cuda\")\n\n if isinstance(values, dict):\n return {k: to_device(v, device=device) for k, v in values.items()}\n elif isinstance(values, tuple):\n return tuple(to_device(v, device=device) for v in values)\n elif isinstance(values, list):\n return [to_device(v, device=device) for v in values]\n elif isinstance(values, th.Tensor):\n return values.to(device, non_blocking=non_blocking)\n elif isinstance(values, nn.Module):\n return values.to(device)\n elif isinstance(values, np.ndarray):\n return th.from_numpy(values).to(device)\n else:\n return values" }, { "identifier": "make_postex", "path": "dva/geom.py", "snippet": "def make_postex(v, idxim, barim):\n return (\n barim[None, :, :, 0, None] * v[:, idxim[:, :, 0]]\n + barim[None, :, :, 1, None] * v[:, idxim[:, :, 1]]\n + barim[None, :, :, 2, None] * v[:, idxim[:, :, 2]]\n ).permute(0, 3, 1, 2)" }, { "identifier": "compute_tbn", "path": "dva/geom.py", "snippet": "def compute_tbn(geom, vt, vi, vti):\n \"\"\"Computes tangent, bitangent, and normal vectors given a mesh.\n Args:\n geom: [N, n_verts, 3] th.Tensor\n Vertex positions.\n vt: [n_uv_coords, 2] th.Tensor\n UV coordinates.\n vi: [..., 3] th.Tensor\n Face vertex indices.\n vti: [..., 3] th.Tensor\n Face UV indices.\n Returns:\n [..., 3] th.Tensors for T, B, N.\n \"\"\"\n\n v0 = geom[:, vi[..., 0]]\n v1 = geom[:, vi[..., 1]]\n v2 = geom[:, vi[..., 2]]\n vt0 = vt[vti[..., 0]]\n vt1 = vt[vti[..., 1]]\n vt2 = vt[vti[..., 2]]\n\n v01 = v1 - v0\n v02 = v2 - v0\n vt01 = vt1 - vt0\n vt02 = vt2 - vt0\n f = 1.0 / (\n vt01[None, ..., 0] * vt02[None, ..., 1]\n - vt01[None, ..., 1] * vt02[None, ..., 0]\n )\n tangent = f[..., None] * th.stack(\n [\n v01[..., 0] * vt02[None, ..., 1] - v02[..., 0] * vt01[None, ..., 1],\n v01[..., 1] * vt02[None, ..., 1] - v02[..., 1] * vt01[None, ..., 1],\n v01[..., 2] * vt02[None, ..., 1] - v02[..., 2] * vt01[None, ..., 1],\n ],\n dim=-1,\n )\n tangent = F.normalize(tangent, dim=-1)\n normal = F.normalize(th.cross(v01, v02, dim=3), dim=-1)\n bitangent = F.normalize(th.cross(tangent, normal, dim=3), dim=-1)\n\n return tangent, bitangent, normal" } ]
import os import sys import imageio import torch as th import numpy as np import random import logging from omegaconf import OmegaConf from dva.ray_marcher import RayMarcher, generate_colored_boxes from primdiffusion.dataset.renderpeople_crossid_dataset import RenderPeopleSViewDataset from dva.io import load_static_assets_crossid_smpl, load_from_config from dva.utils import to_device from dva.geom import make_postex, compute_tbn
7,799
) return preds_boxes["rgba_image"][:, :3].permute(0, 2, 3, 1) def set_random_seed(seed): r"""Set random seeds for everything. Args: seed (int): Random seed. by_rank (bool): """ print(f"Using random seed {seed}") random.seed(seed) np.random.seed(seed) th.manual_seed(seed) th.cuda.manual_seed(seed) th.cuda.manual_seed_all(seed) def to_video_out(input): ndarr = input[0].mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to("cpu", th.uint8).numpy() return ndarr def main(config): use_ddim = config.ddim device = th.device("cuda:0") th.cuda.set_device(device) static_assets = load_static_assets_crossid_smpl(config) inference_output_dir = f"{config.output_dir}/primdiffusion_interm_visualization" checkpoint_path = config.checkpoint_path os.makedirs(inference_output_dir, exist_ok=True) video_path = os.path.join(inference_output_dir, 'videos') os.makedirs(video_path, exist_ok=True) OmegaConf.save(config, os.path.join(inference_output_dir, "config.yml")) logger.info(f"saving results to {inference_output_dir}") logger.info(f"starting inference with the config: {OmegaConf.to_yaml(config)}") model = load_from_config( config.model, assets=static_assets, ) print('loading checkpoint {}'.format(checkpoint_path)) state_dict = th.load(checkpoint_path, map_location='cpu') model.load_state_dict(state_dict['model_state_dict']) model = model.to(device) model.device = device model.eval() # computing values for the given viewpoints rm = RayMarcher( config.image_height, config.image_width, **config.rm, ).to(device) dataset = RenderPeopleSViewDataset( **config.data, cameras=config.cameras_train, cond_cameras=config.cameras_cond, sample_cameras=False, is_train=False, camera_id='00', ) sample_num = 1 seed_list = [1007,] dataset.gen_inf_cameras(num_views=5) for iter in range(1000): logger.info('Rendering iteration-{:04d}......'.format(iter)) set_random_seed(iter) batch = dataset.sample_cam_smpl() batch = to_device(batch, device) if use_ddim: log_every_t = 1 samples, z_denoise_row = model.sample_log(cond=None, batch_size = sample_num, ddim=True, ddim_steps=100, eta=0.0, log_every_t=log_every_t) z_denoise_row = z_denoise_row['x_inter'] else: log_every_t = 10 samples, z_denoise_row = model.sample_log(cond=None, batch_size = sample_num, ddim=False, ddim_steps=None, eta=0.0, log_every_t=log_every_t) samples = (samples / model.scaling_factor + 1) / 2. * 255. denoise_row = (th.stack(z_denoise_row) / model.scaling_factor + 1) / 2. * 255 prim_size = config.model.bodydecoder_config.prim_size n_prims_x = n_prims_y = int(config.model.bodydecoder_config.n_prims ** 0.5) # plot denoising row denoise_row = denoise_row.reshape(-1, sample_num, prim_size, 7, n_prims_y, prim_size, n_prims_x, prim_size).permute(0, 1, 4, 6, 3, 2, 5, 7).reshape(-1, sample_num, n_prims_y * n_prims_x, 7, prim_size, prim_size, prim_size) denoise_sample_deltascale = th.mean(denoise_row[:, :, :, 4:], dim=(-1, -2, -3)) / 255. * 20. denoise_sample_rgba = denoise_row[:, :, :, :4, :, :, :] num_steps = denoise_row.shape[0] for i in range(sample_num): batch = dataset.sample_cam_smpl() sam_cam = {} sam_cam.update(dataset.inf_cameras[dataset.subject_ids[0]]['camera0000']) for k, v in sam_cam.items(): if isinstance(v, np.ndarray): sam_cam[k] = v[None, ...] batch.update(sam_cam) batch = to_device(batch, device) B = 1 geom = model.bodydecoder.lbs_fn( poses = batch["poses"], shapes = batch["shapes"], Rh = batch["Rh"], Th = batch["Th"], v_template = model.bodydecoder.lbs_fn.v_template[np.newaxis], ) * 1000.0 prim_pos_mesh = ( make_postex(geom, model.bodydecoder.prim_vidx_img, model.bodydecoder.prim_bary_img) .permute(0, 2, 3, 1) .reshape(-1, model.bodydecoder.n_prims, 3) .detach() ) prim_scale_mesh = ( model.bodydecoder.prim_scale[np.newaxis, :, np.newaxis].expand(B, -1, 3).detach().clone() )
device = th.device("cuda") logger = logging.getLogger("visualize.py") def render_mvp_boxes(rm, batch, preds): with th.no_grad(): boxes_rgba = generate_colored_boxes( preds["prim_rgba"], preds["prim_rot"], ) preds_boxes = rm( prim_rgba=boxes_rgba, prim_pos=preds["prim_pos"], prim_scale=preds["prim_scale"], prim_rot=preds["prim_rot"], RT=batch["Rt"], K=batch["K"], ) return preds_boxes["rgba_image"][:, :3].permute(0, 2, 3, 1) def set_random_seed(seed): r"""Set random seeds for everything. Args: seed (int): Random seed. by_rank (bool): """ print(f"Using random seed {seed}") random.seed(seed) np.random.seed(seed) th.manual_seed(seed) th.cuda.manual_seed(seed) th.cuda.manual_seed_all(seed) def to_video_out(input): ndarr = input[0].mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to("cpu", th.uint8).numpy() return ndarr def main(config): use_ddim = config.ddim device = th.device("cuda:0") th.cuda.set_device(device) static_assets = load_static_assets_crossid_smpl(config) inference_output_dir = f"{config.output_dir}/primdiffusion_interm_visualization" checkpoint_path = config.checkpoint_path os.makedirs(inference_output_dir, exist_ok=True) video_path = os.path.join(inference_output_dir, 'videos') os.makedirs(video_path, exist_ok=True) OmegaConf.save(config, os.path.join(inference_output_dir, "config.yml")) logger.info(f"saving results to {inference_output_dir}") logger.info(f"starting inference with the config: {OmegaConf.to_yaml(config)}") model = load_from_config( config.model, assets=static_assets, ) print('loading checkpoint {}'.format(checkpoint_path)) state_dict = th.load(checkpoint_path, map_location='cpu') model.load_state_dict(state_dict['model_state_dict']) model = model.to(device) model.device = device model.eval() # computing values for the given viewpoints rm = RayMarcher( config.image_height, config.image_width, **config.rm, ).to(device) dataset = RenderPeopleSViewDataset( **config.data, cameras=config.cameras_train, cond_cameras=config.cameras_cond, sample_cameras=False, is_train=False, camera_id='00', ) sample_num = 1 seed_list = [1007,] dataset.gen_inf_cameras(num_views=5) for iter in range(1000): logger.info('Rendering iteration-{:04d}......'.format(iter)) set_random_seed(iter) batch = dataset.sample_cam_smpl() batch = to_device(batch, device) if use_ddim: log_every_t = 1 samples, z_denoise_row = model.sample_log(cond=None, batch_size = sample_num, ddim=True, ddim_steps=100, eta=0.0, log_every_t=log_every_t) z_denoise_row = z_denoise_row['x_inter'] else: log_every_t = 10 samples, z_denoise_row = model.sample_log(cond=None, batch_size = sample_num, ddim=False, ddim_steps=None, eta=0.0, log_every_t=log_every_t) samples = (samples / model.scaling_factor + 1) / 2. * 255. denoise_row = (th.stack(z_denoise_row) / model.scaling_factor + 1) / 2. * 255 prim_size = config.model.bodydecoder_config.prim_size n_prims_x = n_prims_y = int(config.model.bodydecoder_config.n_prims ** 0.5) # plot denoising row denoise_row = denoise_row.reshape(-1, sample_num, prim_size, 7, n_prims_y, prim_size, n_prims_x, prim_size).permute(0, 1, 4, 6, 3, 2, 5, 7).reshape(-1, sample_num, n_prims_y * n_prims_x, 7, prim_size, prim_size, prim_size) denoise_sample_deltascale = th.mean(denoise_row[:, :, :, 4:], dim=(-1, -2, -3)) / 255. * 20. denoise_sample_rgba = denoise_row[:, :, :, :4, :, :, :] num_steps = denoise_row.shape[0] for i in range(sample_num): batch = dataset.sample_cam_smpl() sam_cam = {} sam_cam.update(dataset.inf_cameras[dataset.subject_ids[0]]['camera0000']) for k, v in sam_cam.items(): if isinstance(v, np.ndarray): sam_cam[k] = v[None, ...] batch.update(sam_cam) batch = to_device(batch, device) B = 1 geom = model.bodydecoder.lbs_fn( poses = batch["poses"], shapes = batch["shapes"], Rh = batch["Rh"], Th = batch["Th"], v_template = model.bodydecoder.lbs_fn.v_template[np.newaxis], ) * 1000.0 prim_pos_mesh = ( make_postex(geom, model.bodydecoder.prim_vidx_img, model.bodydecoder.prim_bary_img) .permute(0, 2, 3, 1) .reshape(-1, model.bodydecoder.n_prims, 3) .detach() ) prim_scale_mesh = ( model.bodydecoder.prim_scale[np.newaxis, :, np.newaxis].expand(B, -1, 3).detach().clone() )
tbn = compute_tbn(geom, model.bodydecoder.geo_fn.vt, model.bodydecoder.prim_vidx_img, model.bodydecoder.prim_vtidx_img)
7
2023-12-06 05:12:55+00:00
12k
ml-stat-Sustech/TorchCP
tests/test_classification.py
[ { "identifier": "ClassWisePredictor", "path": "torchcp/classification/predictors/classwise.py", "snippet": "class ClassWisePredictor(SplitPredictor):\n \"\"\"\n\n Applications of Class-Conditional Conformal Predictor in Multi-Class Classification (Shi et al., 2013)\n paper: https://ieeexplore.ieee.org/document/6784618\n \n \n :param score_function: non-conformity score function.\n :param model: a pytorch model.\n \"\"\"\n\n def __init__(self, score_function, model=None):\n super(ClassWisePredictor, self).__init__(score_function, model)\n self.q_hat = None\n\n def calculate_threshold(self, logits, labels, alpha):\n if alpha >= 1 or alpha <= 0:\n raise ValueError(\"Significance level 'alpha' must be in (0,1).\")\n logits = logits.to(self._device)\n labels = labels.to(self._device)\n # Count the number of classes\n num_classes = logits.shape[1]\n self.q_hat = torch.zeros(num_classes, device=self._device)\n for label in range(num_classes):\n x_cal_tmp = logits[labels == label]\n y_cal_tmp = labels[labels == label]\n scores = self.score_function(x_cal_tmp, y_cal_tmp)\n self.q_hat[label] = self._calculate_conformal_value(scores, alpha)" }, { "identifier": "ClusterPredictor", "path": "torchcp/classification/predictors/cluster.py", "snippet": "class ClusterPredictor(SplitPredictor):\n \"\"\"\n Class-Conditional Conformal Prediction with Many Classes (Ding et al., 2023).\n paper: https://arxiv.org/abs/2306.09335.\n \n :param score_function: a non-conformity score function.\n :param model: a pytorch model.\n :param ratio_clustering: the ratio of examples in the calibration dataset used to cluster classes.\n :param num_clusters: the number of clusters. If ratio_clustering is \"auto\", the number of clusters is automatically computed.\n :param split: the method to split the dataset into clustering dataset and calibration set. Options are 'proportional' (sample proportional to distribution such that rarest class has n_clustering example), 'doubledip' (don't split and use all data for both steps, or 'random' (each example is assigned to clustering step with some fixed probability).\n \"\"\"\n\n def __init__(self, score_function, model=None, ratio_clustering=\"auto\", num_clusters=\"auto\", split='random',\n temperature=1):\n\n super(ClusterPredictor, self).__init__(score_function, model, temperature)\n self.__ratio_clustering = ratio_clustering\n self.__num_clusters = num_clusters\n self.__split = split\n\n def calculate_threshold(self, logits, labels, alpha):\n if alpha >= 1 or alpha <= 0:\n raise ValueError(\"Significance level 'alpha' must be in (0,1).\")\n logits = logits.to(self._device)\n labels = labels.to(self._device)\n num_classes = logits.shape[1]\n scores = self.score_function(logits, labels)\n\n alpha = torch.tensor(alpha, device=self._device)\n classes_statistics = torch.tensor([torch.sum(labels == k).item() for k in range(num_classes)],\n device=self._device)\n\n # 1) Choose necessary parameters for Cluster algorithm\n if self.__ratio_clustering == 'auto' and self.__num_clusters == 'auto':\n n_min = torch.min(classes_statistics)\n n_thresh = self.__get_quantile_minimum(alpha)\n # Classes with fewer than n_thresh examples will be excluded from clustering\n n_min = torch.maximum(n_min, n_thresh)\n num_remaining_classes = torch.sum((classes_statistics >= n_min).float())\n\n # Compute the number of clusters and the minium number of examples for each class\n n_clustering = (n_min * num_remaining_classes / (75 + num_remaining_classes)).clone().to(\n torch.int32).to(self._device)\n self.__num_clusters = torch.floor(n_clustering / 2).to(torch.int32)\n self.__ratio_clustering = n_clustering / n_min\n\n # 2) Split data\n clustering_scores, clustering_labels, cal_scores, cal_labels = self.__split_data(scores,\n labels,\n classes_statistics)\n\n # 3) Filter \"rare\" classes\n rare_classes = self.__get_rare_classes(clustering_labels, alpha, num_classes)\n\n # 4) Run clustering\n if (num_classes - len(rare_classes) > self.__num_clusters) and (self.__num_clusters > 1):\n # Filter out rare classes and re-index\n remaining_idx, filtered_labels, class_remapping = self.__remap_classes(clustering_labels, rare_classes)\n filtered_scores = clustering_scores[remaining_idx]\n\n # Compute embedding for each class and get class counts\n embeddings, class_cts = self.__embed_all_classes(filtered_scores, filtered_labels)\n kmeans = KMeans(n_clusters=int(self.__num_clusters), n_init=10).fit(X=embeddings.detach().cpu().numpy(),\n sample_weight=np.sqrt(\n class_cts.detach().cpu().numpy()))\n nonrare_class_cluster_assignments = torch.tensor(kmeans.labels_, device=self._device)\n\n cluster_assignments = - torch.ones((num_classes,), dtype=torch.int32, device=self._device)\n\n for cls, remapped_cls in class_remapping.items():\n cluster_assignments[cls] = nonrare_class_cluster_assignments[remapped_cls]\n else:\n cluster_assignments = - torch.ones((num_classes,), dtype=torch.int32, device=self._device)\n\n # 5) Compute qhats for each cluster\n\n self.q_hat = self.__compute_cluster_specific_qhats(cluster_assignments,\n cal_scores,\n cal_labels,\n alpha)\n\n def __split_data(self, scores, labels, classes_statistics):\n if self.__split == 'proportional':\n # Split dataset along with fraction \"frac_clustering\"\n num_classes = classes_statistics.shape[0]\n n_k = torch.tensor([self.__ratio_clustering * classes_statistics[k] for k in range(num_classes)],\n device=self._device, dtype=torch.int32)\n idx1 = torch.zeros(labels.shape, dtype=torch.bool, device=self._device)\n for k in range(num_classes):\n # Randomly select n instances of class k\n idx = torch.argwhere(labels == k).flatten()\n random_indices = torch.randint(0, classes_statistics[k], (n_k[k],), device=self._device)\n selected_idx = idx[random_indices]\n idx1[selected_idx] = 1\n clustering_scores = scores[idx1]\n clustering_labels = labels[idx1]\n cal_scores = scores[~idx1]\n cal_labels = labels[~idx1]\n\n elif self.__split == 'doubledip':\n clustering_scores, clustering_labels = scores, labels\n cal_scores, cal_labels = scores, labels\n\n elif self.__split == 'random':\n # Each point is assigned to clustering set w.p. frac_clustering \n idx1 = torch.rand(size=(len(labels),), device=self._device) < self.__ratio_clustering\n\n clustering_scores = scores[idx1]\n clustering_labels = labels[idx1]\n cal_scores = scores[~idx1]\n cal_labels = labels[~idx1]\n else:\n raise Exception(\"Invalid split method. Options are 'proportional', 'doubledip', and 'random'\")\n return clustering_scores, clustering_labels, cal_scores, cal_labels\n\n def __get_quantile_minimum(self, alpha):\n \"\"\"\n Compute smallest n such that ceil((n+1)*(1-alpha)/n) <= 1\n \"\"\"\n n = torch.tensor(0, device=alpha.device)\n while torch.ceil((n + 1) * (1 - alpha) / n) > 1:\n n += 1\n return n\n\n def __get_rare_classes(self, labels, alpha, num_classes):\n \"\"\"\n Choose classes whose number is less than or equal to .\n \"\"\"\n thresh = self.__get_quantile_minimum(alpha)\n classes, cts = torch.unique(labels, return_counts=True)\n rare_classes = classes[cts < thresh].to(self._device)\n\n # Also included any classes that are so rare that we have 0 labels for it\n\n all_classes = torch.arange(num_classes, device=self._device)\n zero_ct_classes = all_classes[(all_classes.view(1, -1) != classes.view(-1, 1)).all(dim=0)]\n rare_classes = torch.concatenate((rare_classes, zero_ct_classes))\n\n return rare_classes\n\n def __remap_classes(self, labels, rare_classes):\n \"\"\"\n Exclude classes in rare_classes and remap remaining classes to be 0-indexed\n\n :returns:\n - remaining_idx: Boolean array the same length as labels. Entry i is True\n if labels[i] is not in rare_classes\n - remapped_labels : Array that only contains the entries of labels that are\n not in rare_classes (in order)\n - remapping : Dict mapping old class index to new class index\n\n \"\"\"\n labels = labels.detach().cpu().numpy()\n rare_classes = rare_classes.detach().cpu().numpy()\n remaining_idx = ~np.isin(labels, rare_classes)\n\n remaining_labels = labels[remaining_idx]\n remapped_labels = np.zeros(remaining_labels.shape, dtype=int)\n new_idx = 0\n remapping = {}\n for i in range(len(remaining_labels)):\n if remaining_labels[i] in remapping:\n remapped_labels[i] = remapping[remaining_labels[i]]\n else:\n remapped_labels[i] = new_idx\n remapping[remaining_labels[i]] = new_idx\n new_idx += 1\n\n return torch.from_numpy(remaining_idx).to(self._device), torch.tensor(remapped_labels,\n device=self._device), remapping\n\n def __embed_all_classes(self, scores_all, labels, q=[0.5, 0.6, 0.7, 0.8, 0.9]):\n \"\"\"\n :param scores_all: num_instances-length array where scores_all[i] = score of true class for instance i.\n :param labels: num_instances-length array of true class labels.\n :param q: quantiles to include in embedding.\n\n :returns:\n - embeddings: num_classes x len(q) array where ith row is the embeddings of class i.\n - cts: num_classes-length array where cts[i] = # of times class i appears in labels .\n \"\"\"\n num_classes = len(torch.unique(labels))\n embeddings = torch.zeros((num_classes, len(q)), device=self._device)\n cts = torch.zeros((num_classes,), device=self._device)\n\n for i in range(num_classes):\n if len(scores_all.shape) > 1:\n raise DimensionError(f\"Expected 1-dimension, but got {len(scores_all.shape)}-dimension.\")\n\n class_i_scores = scores_all[labels == i]\n\n cts[i] = class_i_scores.shape[0]\n # Computes the q-quantiles of samples and returns the vector of quantiles\n embeddings[i, :] = torch.quantile(class_i_scores, torch.tensor(q, device=self._device))\n\n return embeddings, cts\n\n def __compute_cluster_specific_qhats(self, cluster_assignments, cal_class_scores, cal_true_labels, alpha):\n '''\n Computes cluster-specific quantiles (one for each class) that will result in marginal coverage of (1-alpha)\n \n :param cluster_assignments: num_classes length array where entry i is the index of the cluster that class i belongs to. Rare classes can be assigned to cluster -1 and they will automatically be given as default_qhat. \n :param cal_class_scores: cal_class_scores[i] is the score for instance i.\n :param cal_true_labels: true class labels for instances\n :param alpha: Desired coverage level\n\n\n :return : num_classes length array where entry i is the quantile correspond to the cluster that class i belongs to.\n '''\n\n # Map true class labels to clusters\n cal_true_clusters = torch.tensor([cluster_assignments[label] for label in cal_true_labels], device=self._device)\n num_clusters = torch.max(cluster_assignments) + 1\n \n cluster_qhats = self.__compute_class_specific_qhats(cal_class_scores, cal_true_clusters, num_clusters, alpha)\n # Map cluster qhats back to classes\n num_classes = len(cluster_assignments)\n qhats_class = torch.tensor([cluster_qhats[cluster_assignments[k]] for k in range(num_classes)],\n device=self._device)\n\n return qhats_class\n\n def __compute_class_specific_qhats(self, cal_class_scores, cal_true_clusters, num_clusters, alpha):\n '''\n Computes class-specific quantiles (one for each class) that will result in marginal coverage of (1-alpha)\n \n :param cal_class_scores: num_instances-length array where cal_class_scores[i] is the score for instance i\n :param cal_true_clusters: num_instances-length array of true class labels. If class -1 appears, it will be assigned the null_qhat value. It is appended as an extra entry of the returned q_hats so that q_hats[-1] = null_qhat.\n :param num_clusters: the number of clusters.\n :param alpha: Desired coverage level.\n\n :return: the threshold of each class\n '''\n\n # Compute quantile q_hat that will result in marginal coverage of (1-alpha)\n null_qhat = self._calculate_conformal_value(cal_class_scores, alpha)\n\n q_hats = torch.zeros((num_clusters,), device=self._device) # q_hats[i] = quantile for class i\n for k in range(num_clusters):\n # Only select data for which k is true class\n idx = (cal_true_clusters == k)\n scores = cal_class_scores[idx]\n q_hats[k] = self._calculate_conformal_value(scores, alpha)\n if -1 in cal_true_clusters:\n q_hats = torch.concatenate((q_hats, torch.tensor([null_qhat], device=self._device)))\n\n return q_hats" }, { "identifier": "SplitPredictor", "path": "torchcp/classification/predictors/split.py", "snippet": "class SplitPredictor(BasePredictor):\n \"\"\"\n Split Conformal Prediction (Vovk et a., 2005).\n Book: https://link.springer.com/book/10.1007/978-3-031-06649-8.\n \n :param score_function: non-conformity score function.\n :param model: a pytorch model.\n :param temperature: the temperature of Temperature Scaling.\n \"\"\"\n def __init__(self, score_function, model=None, temperature=1):\n super().__init__(score_function, model, temperature)\n\n #############################\n # The calibration process\n ############################\n def calibrate(self, cal_dataloader, alpha):\n self._model.eval()\n logits_list = []\n labels_list = []\n with torch.no_grad():\n for examples in cal_dataloader:\n tmp_x, tmp_labels = examples[0].to(self._device), examples[1].to(self._device)\n tmp_logits = self._logits_transformation(self._model(tmp_x)).detach()\n logits_list.append(tmp_logits)\n labels_list.append(tmp_labels)\n logits = torch.cat(logits_list).float()\n labels = torch.cat(labels_list)\n self.calculate_threshold(logits, labels, alpha)\n\n def calculate_threshold(self, logits, labels, alpha):\n if alpha >= 1 or alpha <= 0:\n raise ValueError(\"Significance level 'alpha' must be in (0,1).\")\n logits = logits.to(self._device)\n labels = labels.to(self._device)\n scores = self.score_function(logits, labels)\n self.q_hat = self._calculate_conformal_value(scores, alpha)\n\n def _calculate_conformal_value(self, scores, alpha):\n \"\"\"\n Calculate the 1-alpha quantile of scores.\n \n :param scores: non-conformity scores.\n :param alpha: a significance level.\n \n :return: the threshold which is use to construct prediction sets.\n \"\"\"\n if len(scores) == 0:\n warnings.warn(\n \"The number of scores is 0, which is a invalid scores. To avoid program crash, the threshold is set as torch.inf.\")\n return torch.inf\n qunatile_value = math.ceil(scores.shape[0] + 1) * (1 - alpha) / scores.shape[0]\n\n if qunatile_value > 1:\n warnings.warn(\n \"The value of quantile exceeds 1. It should be a value in (0,1). To avoid program crash, the threshold is set as torch.inf.\")\n return torch.inf\n\n return torch.quantile(scores, qunatile_value).to(self._device)\n\n #############################\n # The prediction process\n ############################\n def predict(self, x_batch):\n \"\"\"\n The input of score function is softmax probability.\n\n :param x_batch: a batch of instances.\n \"\"\"\n self._model.eval()\n if self._model != None:\n x_batch = self._model(x_batch.to(self._device)).float()\n x_batch = self._logits_transformation(x_batch).detach()\n sets = self.predict_with_logits(x_batch)\n return sets\n\n def predict_with_logits(self, logits, q_hat=None):\n \"\"\"\n The input of score function is softmax probability.\n if q_hat is not given by the function 'self.calibrate', the construction progress of prediction set is a naive method.\n\n :param logits: model output before softmax.\n :param q_hat: the conformal threshold.\n\n :return: prediction sets\n \"\"\"\n scores = self.score_function(logits).to(self._device)\n if q_hat is None:\n S = self._generate_prediction_set(scores, self.q_hat)\n else:\n S = self._generate_prediction_set(scores, q_hat)\n return S\n\n #############################\n # The evaluation process\n ############################\n\n def evaluate(self, val_dataloader):\n prediction_sets = []\n labels_list = []\n with torch.no_grad():\n for examples in val_dataloader:\n tmp_x, tmp_label = examples[0].to(self._device), examples[1].to(self._device)\n prediction_sets_batch = self.predict(tmp_x)\n prediction_sets.extend(prediction_sets_batch)\n labels_list.append(tmp_label)\n val_labels = torch.cat(labels_list)\n\n res_dict = {\"Coverage_rate\": self._metric('coverage_rate')(prediction_sets, val_labels),\n \"Average_size\": self._metric('average_size')(prediction_sets, val_labels)}\n return res_dict" }, { "identifier": "APS", "path": "torchcp/classification/scores/aps.py", "snippet": "class APS(BaseScore):\n \"\"\"\n Adaptive Prediction Sets (Romano et al., 2020)\n paper :https://proceedings.neurips.cc/paper/2020/file/244edd7e85dc81602b7615cd705545f5-Paper.pdf\n \"\"\"\n\n def __call__(self, logits, label=None):\n assert len(logits.shape) <= 2, \"The dimension of logits must be less than 2.\"\n if len(logits.shape) == 1:\n logits = logits.unsqueeze(0)\n probs = torch.softmax(logits, dim=-1)\n if label is None:\n return self._calculate_all_label(probs)\n else:\n return self._calculate_single_label(probs, label)\n\n def _calculate_all_label(self, probs):\n indices, ordered, cumsum = self._sort_sum(probs)\n U = torch.rand(probs.shape, device=probs.device)\n ordered_scores = cumsum - ordered * U\n _, sorted_indices = torch.sort(indices, descending=False, dim=-1)\n scores = ordered_scores.gather(dim=-1, index=sorted_indices)\n return scores\n\n def _sort_sum(self, probs):\n # ordered: the ordered probabilities in descending order\n # indices: the rank of ordered probabilities in descending order\n # cumsum: the accumulation of sorted probabilities\n ordered, indices = torch.sort(probs, dim=-1, descending=True)\n cumsum = torch.cumsum(ordered, dim=-1)\n return indices, ordered, cumsum\n\n def _calculate_single_label(self, probs, label):\n indices, ordered, cumsum = self._sort_sum(probs)\n U = torch.rand(indices.shape[0], device=probs.device)\n idx = torch.where(indices == label.view(-1, 1))\n scores_first_rank = U * cumsum[idx]\n idx_minus_one = (idx[0], idx[1] - 1)\n scores_usual = U * ordered[idx] + cumsum[idx_minus_one]\n return torch.where(idx[1] == 0, scores_first_rank, scores_usual)" }, { "identifier": "Margin", "path": "torchcp/classification/scores/margin.py", "snippet": "class Margin(APS):\n\n def __init__(self, ) -> None:\n pass\n\n def _calculate_single_label(self, probs, label):\n row_indices = torch.arange(probs.size(0), device=probs.device)\n target_prob = probs[row_indices, label].clone()\n probs[row_indices, label] = -1\n second_highest_prob = torch.max(probs, dim=-1).values\n return second_highest_prob - target_prob\n\n def _calculate_all_label(self, probs):\n temp_probs = probs.unsqueeze(1).repeat(1, probs.shape[1], 1)\n indices = torch.arange(probs.shape[1]).to(probs.device)\n temp_probs[None, indices, indices] = torch.finfo(torch.float32).min\n scores = torch.max(temp_probs, dim=-1).values - probs\n return scores" }, { "identifier": "RAPS", "path": "torchcp/classification/scores/raps.py", "snippet": "class RAPS(APS):\n \"\"\"\n Regularized Adaptive Prediction Sets (Angelopoulos et al., 2020)\n paper : https://arxiv.org/abs/2009.14193\n \n :param penalty: the weight of regularization. When penalty = 0, RAPS=APS.\n :param kreg: the rank of regularization which is an integer in [0,labels_num].\n \"\"\"\n\n def __init__(self, penalty, kreg=0):\n \n if penalty <= 0:\n raise ValueError(\"The parameter 'penalty' must be a positive value.\")\n if kreg < 0:\n raise ValueError(\"The parameter 'kreg' must be a nonnegative value.\")\n if type(kreg) != int:\n raise TypeError(\"The parameter 'kreg' must be a integer.\")\n super(RAPS, self).__init__()\n self.__penalty = penalty\n self.__kreg = kreg\n\n def _calculate_all_label(self, probs):\n indices, ordered, cumsum = self._sort_sum(probs)\n U = torch.rand(probs.shape, device=probs.device)\n reg = torch.maximum(self.__penalty * (torch.arange(1, probs.shape[-1] + 1, device=probs.device) - self.__kreg),\n torch.tensor(0, device=probs.device))\n ordered_scores = cumsum - ordered * U + reg\n _, sorted_indices = torch.sort(indices, descending=False, dim=-1)\n scores = ordered_scores.gather(dim=-1, index=sorted_indices)\n return scores\n \n def _calculate_single_label(self, probs, label):\n indices, ordered, cumsum = self._sort_sum(probs)\n U = torch.rand(indices.shape[0], device=probs.device)\n idx = torch.where(indices == label.view(-1, 1))\n reg = torch.maximum(self.__penalty * (idx[1] + 1 - self.__kreg), torch.tensor(0).to(probs.device))\n scores_first_rank = U * ordered[idx] + reg\n idx_minus_one = (idx[0], idx[1] - 1)\n scores_usual = U * ordered[idx] + cumsum[idx_minus_one] + reg\n return torch.where(idx[1] == 0, scores_first_rank, scores_usual)" }, { "identifier": "SAPS", "path": "torchcp/classification/scores/saps.py", "snippet": "class SAPS(APS):\n \"\"\"\n Sorted Adaptive Prediction Sets (Huang et al., 2023)\n paper: https://arxiv.org/abs/2310.06430\n \n :param weight: the weight of label ranking.\n \"\"\"\n\n def __init__(self, weight):\n\n super(SAPS, self).__init__()\n if weight <= 0:\n raise ValueError(\"The parameter 'weight' must be a positive value.\")\n self.__weight = weight\n\n def _calculate_all_label(self, probs):\n indices, ordered, cumsum = self._sort_sum(probs)\n ordered[:, 1:] = self.__weight\n cumsum = torch.cumsum(ordered, dim=-1)\n U = torch.rand(probs.shape, device=probs.device)\n ordered_scores = cumsum - ordered * U\n _, sorted_indices = torch.sort(indices, descending=False, dim=-1)\n scores = ordered_scores.gather(dim=-1, index=sorted_indices)\n return scores\n\n def _calculate_single_label(self, probs, label):\n indices, ordered, cumsum = self._sort_sum(probs)\n U = torch.rand(indices.shape[0], device=probs.device)\n idx = torch.where(indices == label.view(-1, 1))\n scores_first_rank = U * cumsum[idx]\n scores_usual = self.__weight * (idx[1] - U) + ordered[:, 0]\n return torch.where(idx[1] == 0, scores_first_rank, scores_usual)" }, { "identifier": "THR", "path": "torchcp/classification/scores/thr.py", "snippet": "class THR(BaseScore):\n \"\"\"\n Threshold conformal predictors (Sadinle et al., 2016).\n paper : https://arxiv.org/abs/1609.00451.\n \n :param score_type: a transformation on logits. Default: \"softmax\". Optional: \"softmax\", \"Identity\", \"log_softmax\" or \"log\".\n \"\"\"\n\n def __init__(self, score_type=\"softmax\") -> None:\n \n super().__init__()\n self.score_type = score_type\n if score_type == \"Identity\":\n self.transform = lambda x: x\n elif score_type == \"softmax\":\n self.transform = lambda x: torch.softmax(x, dim=- 1)\n elif score_type == \"log_softmax\":\n self.transform = lambda x: torch.log_softmax(x, dim=-1)\n elif score_type == \"log\":\n self.transform = lambda x: torch.log(x)\n else:\n raise NotImplementedError\n\n def __call__(self, logits, label=None):\n assert len(logits.shape) <= 2, \"The dimension of logits must be less than 2.\"\n if len(logits.shape) == 1:\n logits = logits.unsqueeze(0)\n temp_values = self.transform(logits)\n if label is None:\n return self.__calculate_all_label(temp_values)\n else:\n return self.__calculate_single_label(temp_values, label)\n\n def __calculate_single_label(self, temp_values, label):\n return 1 - temp_values[torch.arange(label.shape[0], device=temp_values.device), label]\n\n def __calculate_all_label(self, temp_values):\n return 1 - temp_values" }, { "identifier": "Metrics", "path": "torchcp/classification/utils/metrics.py", "snippet": "class Metrics:\n\n def __call__(self, metric) -> Any:\n if metric not in METRICS_REGISTRY_CLASSIFICATION.registered_names():\n raise NameError(f\"The metric: {metric} is not defined in TorchCP.\")\n return METRICS_REGISTRY_CLASSIFICATION.get(metric)" }, { "identifier": "fix_randomness", "path": "torchcp/utils/common.py", "snippet": "def fix_randomness(seed=0):\n \"\"\"\n Fix the random seed for python, torch, numpy.\n\n :param seed: the random seed\n \"\"\"\n np.random.seed(seed=seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n random.seed(seed)" } ]
import argparse import os import pickle import torch import torchvision import torchvision.datasets as dset import torchvision.transforms as trn from tqdm import tqdm from torchcp.classification.predictors import SplitPredictor, ClusterPredictor, ClassWisePredictor from torchcp.classification.scores import THR, APS, SAPS, RAPS, Margin from torchcp.classification.utils.metrics import Metrics from torchcp.utils import fix_randomness
7,327
# Copyright (c) 2023-present, SUSTech-ML. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # transform = trn.Compose([trn.Resize(256), trn.CenterCrop(224), trn.ToTensor(), trn.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) def test_imagenet_logits(): ####################################### # Loading ImageNet dataset and a pytorch model ####################################### fix_randomness(seed=0) model_name = 'ResNet101' fname = ".cache/" + model_name + ".pkl" if os.path.exists(fname): with open(fname, 'rb') as handle: dataset = pickle.load(handle) else: usr_dir = os.path.expanduser('~') data_dir = os.path.join(usr_dir, "data") dataset = dset.ImageFolder(data_dir + "/imagenet/val", transform) data_loader = torch.utils.data.DataLoader(dataset, batch_size=320, shuffle=False, pin_memory=True) # load model model = torchvision.models.resnet101(weights="IMAGENET1K_V1", progress=True) logits_list = [] labels_list = [] with torch.no_grad(): for examples in tqdm(data_loader): tmp_x, tmp_label = examples[0], examples[1] tmp_logits = model(tmp_x) logits_list.append(tmp_logits) labels_list.append(tmp_label) logits = torch.cat(logits_list) labels = torch.cat(labels_list) dataset = torch.utils.data.TensorDataset(logits, labels.long()) with open(fname, 'wb') as handle: pickle.dump(dataset, handle, protocol=pickle.HIGHEST_PROTOCOL) cal_data, val_data = torch.utils.data.random_split(dataset, [25000, 25000]) cal_logits = torch.stack([sample[0] for sample in cal_data]) cal_labels = torch.stack([sample[1] for sample in cal_data]) test_logits = torch.stack([sample[0] for sample in val_data]) test_labels = torch.stack([sample[1] for sample in val_data]) num_classes = 1000 ####################################### # A standard process of conformal prediction ####################################### alpha = 0.1 predictors = [SplitPredictor, ClassWisePredictor, ClusterPredictor]
# Copyright (c) 2023-present, SUSTech-ML. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # transform = trn.Compose([trn.Resize(256), trn.CenterCrop(224), trn.ToTensor(), trn.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) def test_imagenet_logits(): ####################################### # Loading ImageNet dataset and a pytorch model ####################################### fix_randomness(seed=0) model_name = 'ResNet101' fname = ".cache/" + model_name + ".pkl" if os.path.exists(fname): with open(fname, 'rb') as handle: dataset = pickle.load(handle) else: usr_dir = os.path.expanduser('~') data_dir = os.path.join(usr_dir, "data") dataset = dset.ImageFolder(data_dir + "/imagenet/val", transform) data_loader = torch.utils.data.DataLoader(dataset, batch_size=320, shuffle=False, pin_memory=True) # load model model = torchvision.models.resnet101(weights="IMAGENET1K_V1", progress=True) logits_list = [] labels_list = [] with torch.no_grad(): for examples in tqdm(data_loader): tmp_x, tmp_label = examples[0], examples[1] tmp_logits = model(tmp_x) logits_list.append(tmp_logits) labels_list.append(tmp_label) logits = torch.cat(logits_list) labels = torch.cat(labels_list) dataset = torch.utils.data.TensorDataset(logits, labels.long()) with open(fname, 'wb') as handle: pickle.dump(dataset, handle, protocol=pickle.HIGHEST_PROTOCOL) cal_data, val_data = torch.utils.data.random_split(dataset, [25000, 25000]) cal_logits = torch.stack([sample[0] for sample in cal_data]) cal_labels = torch.stack([sample[1] for sample in cal_data]) test_logits = torch.stack([sample[0] for sample in val_data]) test_labels = torch.stack([sample[1] for sample in val_data]) num_classes = 1000 ####################################### # A standard process of conformal prediction ####################################### alpha = 0.1 predictors = [SplitPredictor, ClassWisePredictor, ClusterPredictor]
score_functions = [THR(), APS(), RAPS(1, 0), SAPS(0.2), Margin()]
7
2023-12-06 09:08:41+00:00
12k
OpenDriveLab/LaneSegNet
projects/lanesegnet/models/modules/bevformer_constructer.py
[ { "identifier": "BEV_CONSTRUCTOR", "path": "projects/lanesegnet/utils/builder.py", "snippet": "BEV_CONSTRUCTOR = Registry('BEV Constructor')" }, { "identifier": "TemporalSelfAttention", "path": "projects/bevformer/modules/temporal_self_attention.py", "snippet": "class TemporalSelfAttention(BaseModule):\r\n \"\"\"An attention module used in BEVFormer based on Deformable-Detr.\r\n\r\n `Deformable DETR: Deformable Transformers for End-to-End Object Detection.\r\n <https://arxiv.org/pdf/2010.04159.pdf>`_.\r\n\r\n Args:\r\n embed_dims (int): The embedding dimension of Attention.\r\n Default: 256.\r\n num_heads (int): Parallel attention heads. Default: 64.\r\n num_levels (int): The number of feature map used in\r\n Attention. Default: 4.\r\n num_points (int): The number of sampling points for\r\n each query in each head. Default: 4.\r\n im2col_step (int): The step used in image_to_column.\r\n Default: 64.\r\n dropout (float): A Dropout layer on `inp_identity`.\r\n Default: 0.1.\r\n batch_first (bool): Key, Query and Value are shape of\r\n (batch, n, embed_dim)\r\n or (n, batch, embed_dim). Default to True.\r\n norm_cfg (dict): Config dict for normalization layer.\r\n Default: None.\r\n init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.\r\n Default: None.\r\n num_bev_queue (int): In this version, we only use one history BEV and one currenct BEV.\r\n the length of BEV queue is 2.\r\n \"\"\"\r\n\r\n def __init__(self,\r\n embed_dims=256,\r\n num_heads=8,\r\n num_levels=4,\r\n num_points=4,\r\n num_bev_queue=2,\r\n im2col_step=64,\r\n dropout=0.1,\r\n batch_first=True,\r\n norm_cfg=None,\r\n init_cfg=None):\r\n\r\n super().__init__(init_cfg)\r\n if embed_dims % num_heads != 0:\r\n raise ValueError(f'embed_dims must be divisible by num_heads, '\r\n f'but got {embed_dims} and {num_heads}')\r\n dim_per_head = embed_dims // num_heads\r\n self.norm_cfg = norm_cfg\r\n self.dropout = nn.Dropout(dropout)\r\n self.batch_first = batch_first\r\n self.fp16_enabled = False\r\n\r\n # you'd better set dim_per_head to a power of 2\r\n # which is more efficient in the CUDA implementation\r\n def _is_power_of_2(n):\r\n if (not isinstance(n, int)) or (n < 0):\r\n raise ValueError(\r\n 'invalid input for _is_power_of_2: {} (type: {})'.format(\r\n n, type(n)))\r\n return (n & (n - 1) == 0) and n != 0\r\n\r\n if not _is_power_of_2(dim_per_head):\r\n warnings.warn(\r\n \"You'd better set embed_dims in \"\r\n 'MultiScaleDeformAttention to make '\r\n 'the dimension of each attention head a power of 2 '\r\n 'which is more efficient in our CUDA implementation.')\r\n\r\n self.im2col_step = im2col_step\r\n self.embed_dims = embed_dims\r\n self.num_levels = num_levels\r\n self.num_heads = num_heads\r\n self.num_points = num_points\r\n self.num_bev_queue = num_bev_queue\r\n self.sampling_offsets = nn.Linear(\r\n embed_dims*self.num_bev_queue, num_bev_queue*num_heads * num_levels * num_points * 2)\r\n self.attention_weights = nn.Linear(embed_dims*self.num_bev_queue,\r\n num_bev_queue*num_heads * num_levels * num_points)\r\n self.value_proj = nn.Linear(embed_dims, embed_dims)\r\n self.output_proj = nn.Linear(embed_dims, embed_dims)\r\n self.init_weights()\r\n\r\n def init_weights(self):\r\n \"\"\"Default initialization for Parameters of Module.\"\"\"\r\n constant_init(self.sampling_offsets, 0.)\r\n thetas = torch.arange(\r\n self.num_heads,\r\n dtype=torch.float32) * (2.0 * math.pi / self.num_heads)\r\n grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)\r\n grid_init = (grid_init /\r\n grid_init.abs().max(-1, keepdim=True)[0]).view(\r\n self.num_heads, 1, 1,\r\n 2).repeat(1, self.num_levels*self.num_bev_queue, self.num_points, 1)\r\n\r\n for i in range(self.num_points):\r\n grid_init[:, :, i, :] *= i + 1\r\n\r\n self.sampling_offsets.bias.data = grid_init.view(-1)\r\n constant_init(self.attention_weights, val=0., bias=0.)\r\n xavier_init(self.value_proj, distribution='uniform', bias=0.)\r\n xavier_init(self.output_proj, distribution='uniform', bias=0.)\r\n self._is_init = True\r\n\r\n def forward(self,\r\n query,\r\n key=None,\r\n value=None,\r\n identity=None,\r\n query_pos=None,\r\n key_padding_mask=None,\r\n reference_points=None,\r\n spatial_shapes=None,\r\n level_start_index=None,\r\n flag='decoder',\r\n\r\n **kwargs):\r\n \"\"\"Forward Function of MultiScaleDeformAttention.\r\n\r\n Args:\r\n query (Tensor): Query of Transformer with shape\r\n (num_query, bs, embed_dims).\r\n key (Tensor): The key tensor with shape\r\n `(num_key, bs, embed_dims)`.\r\n value (Tensor): The value tensor with shape\r\n `(num_key, bs, embed_dims)`.\r\n identity (Tensor): The tensor used for addition, with the\r\n same shape as `query`. Default None. If None,\r\n `query` will be used.\r\n query_pos (Tensor): The positional encoding for `query`.\r\n Default: None.\r\n key_pos (Tensor): The positional encoding for `key`. Default\r\n None.\r\n reference_points (Tensor): The normalized reference\r\n points with shape (bs, num_query, num_levels, 2),\r\n all elements is range in [0, 1], top-left (0,0),\r\n bottom-right (1, 1), including padding area.\r\n or (N, Length_{query}, num_levels, 4), add\r\n additional two dimensions is (w, h) to\r\n form reference boxes.\r\n key_padding_mask (Tensor): ByteTensor for `query`, with\r\n shape [bs, num_key].\r\n spatial_shapes (Tensor): Spatial shape of features in\r\n different levels. With shape (num_levels, 2),\r\n last dimension represents (h, w).\r\n level_start_index (Tensor): The start index of each level.\r\n A tensor has shape ``(num_levels, )`` and can be represented\r\n as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...].\r\n\r\n Returns:\r\n Tensor: forwarded results with shape [num_query, bs, embed_dims].\r\n \"\"\"\r\n\r\n if value is None:\r\n assert self.batch_first\r\n bs, len_bev, c = query.shape\r\n value = torch.stack([query, query], 1).reshape(bs*2, len_bev, c)\r\n\r\n # value = torch.cat([query, query], 0)\r\n\r\n if identity is None:\r\n identity = query\r\n if query_pos is not None:\r\n query = query + query_pos\r\n if not self.batch_first:\r\n # change to (bs, num_query ,embed_dims)\r\n query = query.permute(1, 0, 2)\r\n value = value.permute(1, 0, 2)\r\n bs, num_query, embed_dims = query.shape\r\n _, num_value, _ = value.shape\r\n assert (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() == num_value\r\n assert self.num_bev_queue == 2\r\n\r\n query = torch.cat([value[:bs], query], -1)\r\n value = self.value_proj(value)\r\n\r\n if key_padding_mask is not None:\r\n value = value.masked_fill(key_padding_mask[..., None], 0.0)\r\n\r\n value = value.reshape(bs*self.num_bev_queue,\r\n num_value, self.num_heads, -1)\r\n\r\n sampling_offsets = self.sampling_offsets(query)\r\n sampling_offsets = sampling_offsets.view(\r\n bs, num_query, self.num_heads, self.num_bev_queue, self.num_levels, self.num_points, 2)\r\n attention_weights = self.attention_weights(query).view(\r\n bs, num_query, self.num_heads, self.num_bev_queue, self.num_levels * self.num_points)\r\n attention_weights = attention_weights.softmax(-1)\r\n\r\n attention_weights = attention_weights.view(bs, num_query,\r\n self.num_heads,\r\n self.num_bev_queue,\r\n self.num_levels,\r\n self.num_points)\r\n\r\n attention_weights = attention_weights.permute(0, 3, 1, 2, 4, 5)\\\r\n .reshape(bs*self.num_bev_queue, num_query, self.num_heads, self.num_levels, self.num_points).contiguous()\r\n sampling_offsets = sampling_offsets.permute(0, 3, 1, 2, 4, 5, 6)\\\r\n .reshape(bs*self.num_bev_queue, num_query, self.num_heads, self.num_levels, self.num_points, 2)\r\n\r\n if reference_points.shape[-1] == 2:\r\n offset_normalizer = torch.stack(\r\n [spatial_shapes[..., 1], spatial_shapes[..., 0]], -1)\r\n sampling_locations = reference_points[:, :, None, :, None, :] \\\r\n + sampling_offsets \\\r\n / offset_normalizer[None, None, None, :, None, :]\r\n\r\n elif reference_points.shape[-1] == 4:\r\n sampling_locations = reference_points[:, :, None, :, None, :2] \\\r\n + sampling_offsets / self.num_points \\\r\n * reference_points[:, :, None, :, None, 2:] \\\r\n * 0.5\r\n else:\r\n raise ValueError(\r\n f'Last dim of reference_points must be'\r\n f' 2 or 4, but get {reference_points.shape[-1]} instead.')\r\n if torch.cuda.is_available() and value.is_cuda:\r\n\r\n # using fp16 deformable attention is unstable because it performs many sum operations\r\n if value.dtype == torch.float16:\r\n MultiScaleDeformableAttnFunction = MultiScaleDeformableAttnFunction_fp32\r\n else:\r\n MultiScaleDeformableAttnFunction = MultiScaleDeformableAttnFunction_fp32\r\n output = MultiScaleDeformableAttnFunction.apply(\r\n value, spatial_shapes, level_start_index, sampling_locations,\r\n attention_weights, self.im2col_step)\r\n else:\r\n\r\n output = multi_scale_deformable_attn_pytorch(\r\n value, spatial_shapes, sampling_locations, attention_weights)\r\n\r\n # output shape (bs*num_bev_queue, num_query, embed_dims)\r\n # (bs*num_bev_queue, num_query, embed_dims)-> (num_query, embed_dims, bs*num_bev_queue)\r\n output = output.permute(1, 2, 0)\r\n\r\n # fuse history value and current value\r\n # (num_query, embed_dims, bs*num_bev_queue)-> (num_query, embed_dims, bs, num_bev_queue)\r\n output = output.view(num_query, embed_dims, bs, self.num_bev_queue)\r\n output = output.mean(-1)\r\n\r\n # (num_query, embed_dims, bs)-> (bs, num_query, embed_dims)\r\n output = output.permute(2, 0, 1)\r\n\r\n output = self.output_proj(output)\r\n\r\n if not self.batch_first:\r\n output = output.permute(1, 0, 2)\r\n\r\n return self.dropout(output) + identity\r" }, { "identifier": "MSDeformableAttention3D", "path": "projects/bevformer/modules/spatial_cross_attention.py", "snippet": "class MSDeformableAttention3D(BaseModule):\r\n \"\"\"An attention module used in BEVFormer based on Deformable-Detr.\r\n `Deformable DETR: Deformable Transformers for End-to-End Object Detection.\r\n <https://arxiv.org/pdf/2010.04159.pdf>`_.\r\n Args:\r\n embed_dims (int): The embedding dimension of Attention.\r\n Default: 256.\r\n num_heads (int): Parallel attention heads. Default: 64.\r\n num_levels (int): The number of feature map used in\r\n Attention. Default: 4.\r\n num_points (int): The number of sampling points for\r\n each query in each head. Default: 4.\r\n im2col_step (int): The step used in image_to_column.\r\n Default: 64.\r\n dropout (float): A Dropout layer on `inp_identity`.\r\n Default: 0.1.\r\n batch_first (bool): Key, Query and Value are shape of\r\n (batch, n, embed_dim)\r\n or (n, batch, embed_dim). Default to False.\r\n norm_cfg (dict): Config dict for normalization layer.\r\n Default: None.\r\n init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.\r\n Default: None.\r\n \"\"\"\r\n\r\n def __init__(self,\r\n embed_dims=256,\r\n num_heads=8,\r\n num_levels=4,\r\n num_points=8,\r\n im2col_step=64,\r\n dropout=0.1,\r\n batch_first=True,\r\n norm_cfg=None,\r\n init_cfg=None):\r\n super().__init__(init_cfg)\r\n if embed_dims % num_heads != 0:\r\n raise ValueError(f'embed_dims must be divisible by num_heads, '\r\n f'but got {embed_dims} and {num_heads}')\r\n dim_per_head = embed_dims // num_heads\r\n self.norm_cfg = norm_cfg\r\n self.batch_first = batch_first\r\n self.output_proj = None\r\n self.fp16_enabled = False\r\n\r\n # you'd better set dim_per_head to a power of 2\r\n # which is more efficient in the CUDA implementation\r\n def _is_power_of_2(n):\r\n if (not isinstance(n, int)) or (n < 0):\r\n raise ValueError(\r\n 'invalid input for _is_power_of_2: {} (type: {})'.format(\r\n n, type(n)))\r\n return (n & (n - 1) == 0) and n != 0\r\n\r\n if not _is_power_of_2(dim_per_head):\r\n warnings.warn(\r\n \"You'd better set embed_dims in \"\r\n 'MultiScaleDeformAttention to make '\r\n 'the dimension of each attention head a power of 2 '\r\n 'which is more efficient in our CUDA implementation.')\r\n\r\n self.im2col_step = im2col_step\r\n self.embed_dims = embed_dims\r\n self.num_levels = num_levels\r\n self.num_heads = num_heads\r\n self.num_points = num_points\r\n self.sampling_offsets = nn.Linear(\r\n embed_dims, num_heads * num_levels * num_points * 2)\r\n self.attention_weights = nn.Linear(embed_dims,\r\n num_heads * num_levels * num_points)\r\n self.value_proj = nn.Linear(embed_dims, embed_dims)\r\n\r\n self.init_weights()\r\n\r\n def init_weights(self):\r\n \"\"\"Default initialization for Parameters of Module.\"\"\"\r\n constant_init(self.sampling_offsets, 0.)\r\n thetas = torch.arange(\r\n self.num_heads,\r\n dtype=torch.float32) * (2.0 * math.pi / self.num_heads)\r\n grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)\r\n grid_init = (grid_init /\r\n grid_init.abs().max(-1, keepdim=True)[0]).view(\r\n self.num_heads, 1, 1,\r\n 2).repeat(1, self.num_levels, self.num_points, 1)\r\n for i in range(self.num_points):\r\n grid_init[:, :, i, :] *= i + 1\r\n\r\n self.sampling_offsets.bias.data = grid_init.view(-1)\r\n constant_init(self.attention_weights, val=0., bias=0.)\r\n xavier_init(self.value_proj, distribution='uniform', bias=0.)\r\n xavier_init(self.output_proj, distribution='uniform', bias=0.)\r\n self._is_init = True\r\n\r\n def forward(self,\r\n query,\r\n key=None,\r\n value=None,\r\n identity=None,\r\n query_pos=None,\r\n key_padding_mask=None,\r\n reference_points=None,\r\n spatial_shapes=None,\r\n level_start_index=None,\r\n **kwargs):\r\n \"\"\"Forward Function of MultiScaleDeformAttention.\r\n Args:\r\n query (Tensor): Query of Transformer with shape\r\n ( bs, num_query, embed_dims).\r\n key (Tensor): The key tensor with shape\r\n `(bs, num_key, embed_dims)`.\r\n value (Tensor): The value tensor with shape\r\n `(bs, num_key, embed_dims)`.\r\n identity (Tensor): The tensor used for addition, with the\r\n same shape as `query`. Default None. If None,\r\n `query` will be used.\r\n query_pos (Tensor): The positional encoding for `query`.\r\n Default: None.\r\n key_pos (Tensor): The positional encoding for `key`. Default\r\n None.\r\n reference_points (Tensor): The normalized reference\r\n points with shape (bs, num_query, num_levels, 2),\r\n all elements is range in [0, 1], top-left (0,0),\r\n bottom-right (1, 1), including padding area.\r\n or (N, Length_{query}, num_levels, 4), add\r\n additional two dimensions is (w, h) to\r\n form reference boxes.\r\n key_padding_mask (Tensor): ByteTensor for `query`, with\r\n shape [bs, num_key].\r\n spatial_shapes (Tensor): Spatial shape of features in\r\n different levels. With shape (num_levels, 2),\r\n last dimension represents (h, w).\r\n level_start_index (Tensor): The start index of each level.\r\n A tensor has shape ``(num_levels, )`` and can be represented\r\n as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...].\r\n Returns:\r\n Tensor: forwarded results with shape [num_query, bs, embed_dims].\r\n \"\"\"\r\n\r\n if value is None:\r\n value = query\r\n if identity is None:\r\n identity = query\r\n if query_pos is not None:\r\n query = query + query_pos\r\n\r\n if not self.batch_first:\r\n # change to (bs, num_query ,embed_dims)\r\n query = query.permute(1, 0, 2)\r\n value = value.permute(1, 0, 2)\r\n\r\n bs, num_query, _ = query.shape\r\n bs, num_value, _ = value.shape\r\n assert (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() == num_value\r\n\r\n value = self.value_proj(value)\r\n if key_padding_mask is not None:\r\n value = value.masked_fill(key_padding_mask[..., None], 0.0)\r\n value = value.view(bs, num_value, self.num_heads, -1)\r\n sampling_offsets = self.sampling_offsets(query).view(\r\n bs, num_query, self.num_heads, self.num_levels, self.num_points, 2)\r\n attention_weights = self.attention_weights(query).view(\r\n bs, num_query, self.num_heads, self.num_levels * self.num_points)\r\n\r\n attention_weights = attention_weights.softmax(-1)\r\n\r\n attention_weights = attention_weights.view(bs, num_query,\r\n self.num_heads,\r\n self.num_levels,\r\n self.num_points)\r\n\r\n if reference_points.shape[-1] == 2:\r\n \"\"\"\r\n For each BEV query, it owns `num_Z_anchors` in 3D space that having different heights.\r\n After proejcting, each BEV query has `num_Z_anchors` reference points in each 2D image.\r\n For each referent point, we sample `num_points` sampling points.\r\n For `num_Z_anchors` reference points, it has overall `num_points * num_Z_anchors` sampling points.\r\n \"\"\"\r\n offset_normalizer = torch.stack(\r\n [spatial_shapes[..., 1], spatial_shapes[..., 0]], -1)\r\n\r\n bs, num_query, num_Z_anchors, xy = reference_points.shape\r\n reference_points = reference_points[:, :, None, None, None, :, :]\r\n sampling_offsets = sampling_offsets / \\\r\n offset_normalizer[None, None, None, :, None, :]\r\n bs, num_query, num_heads, num_levels, num_all_points, xy = sampling_offsets.shape\r\n sampling_offsets = sampling_offsets.view(\r\n bs, num_query, num_heads, num_levels, num_all_points // num_Z_anchors, num_Z_anchors, xy)\r\n sampling_locations = reference_points + sampling_offsets\r\n bs, num_query, num_heads, num_levels, num_points, num_Z_anchors, xy = sampling_locations.shape\r\n assert num_all_points == num_points * num_Z_anchors\r\n\r\n sampling_locations = sampling_locations.view(\r\n bs, num_query, num_heads, num_levels, num_all_points, xy)\r\n\r\n elif reference_points.shape[-1] == 4:\r\n assert False\r\n else:\r\n raise ValueError(\r\n f'Last dim of reference_points must be'\r\n f' 2 or 4, but get {reference_points.shape[-1]} instead.')\r\n\r\n # sampling_locations.shape: bs, num_query, num_heads, num_levels, num_all_points, 2\r\n # attention_weights.shape: bs, num_query, num_heads, num_levels, num_all_points\r\n #\r\n\r\n if torch.cuda.is_available() and value.is_cuda:\r\n if value.dtype == torch.float16:\r\n MultiScaleDeformableAttnFunction = MultiScaleDeformableAttnFunction_fp32\r\n else:\r\n MultiScaleDeformableAttnFunction = MultiScaleDeformableAttnFunction_fp32\r\n output = MultiScaleDeformableAttnFunction.apply(\r\n value, spatial_shapes, level_start_index, sampling_locations,\r\n attention_weights, self.im2col_step)\r\n else:\r\n output = multi_scale_deformable_attn_pytorch(\r\n value, spatial_shapes, sampling_locations, attention_weights)\r\n if not self.batch_first:\r\n output = output.permute(1, 0, 2)\r\n\r\n return output\r" }, { "identifier": "CustomMSDeformableAttention", "path": "projects/bevformer/modules/decoder.py", "snippet": "class CustomMSDeformableAttention(BaseModule):\r\n \"\"\"An attention module used in Deformable-Detr.\r\n\r\n `Deformable DETR: Deformable Transformers for End-to-End Object Detection.\r\n <https://arxiv.org/pdf/2010.04159.pdf>`_.\r\n\r\n Args:\r\n embed_dims (int): The embedding dimension of Attention.\r\n Default: 256.\r\n num_heads (int): Parallel attention heads. Default: 64.\r\n num_levels (int): The number of feature map used in\r\n Attention. Default: 4.\r\n num_points (int): The number of sampling points for\r\n each query in each head. Default: 4.\r\n im2col_step (int): The step used in image_to_column.\r\n Default: 64.\r\n dropout (float): A Dropout layer on `inp_identity`.\r\n Default: 0.1.\r\n batch_first (bool): Key, Query and Value are shape of\r\n (batch, n, embed_dim)\r\n or (n, batch, embed_dim). Default to False.\r\n norm_cfg (dict): Config dict for normalization layer.\r\n Default: None.\r\n init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.\r\n Default: None.\r\n \"\"\"\r\n\r\n def __init__(self,\r\n embed_dims=256,\r\n num_heads=8,\r\n num_levels=4,\r\n num_points=4,\r\n im2col_step=64,\r\n dropout=0.1,\r\n batch_first=False,\r\n norm_cfg=None,\r\n init_cfg=None):\r\n super().__init__(init_cfg)\r\n if embed_dims % num_heads != 0:\r\n raise ValueError(f'embed_dims must be divisible by num_heads, '\r\n f'but got {embed_dims} and {num_heads}')\r\n dim_per_head = embed_dims // num_heads\r\n self.norm_cfg = norm_cfg\r\n self.dropout = nn.Dropout(dropout)\r\n self.batch_first = batch_first\r\n self.fp16_enabled = False\r\n\r\n # you'd better set dim_per_head to a power of 2\r\n # which is more efficient in the CUDA implementation\r\n def _is_power_of_2(n):\r\n if (not isinstance(n, int)) or (n < 0):\r\n raise ValueError(\r\n 'invalid input for _is_power_of_2: {} (type: {})'.format(\r\n n, type(n)))\r\n return (n & (n - 1) == 0) and n != 0\r\n\r\n if not _is_power_of_2(dim_per_head):\r\n warnings.warn(\r\n \"You'd better set embed_dims in \"\r\n 'MultiScaleDeformAttention to make '\r\n 'the dimension of each attention head a power of 2 '\r\n 'which is more efficient in our CUDA implementation.')\r\n\r\n self.im2col_step = im2col_step\r\n self.embed_dims = embed_dims\r\n self.num_levels = num_levels\r\n self.num_heads = num_heads\r\n self.num_points = num_points\r\n self.sampling_offsets = nn.Linear(\r\n embed_dims, num_heads * num_levels * num_points * 2)\r\n self.attention_weights = nn.Linear(embed_dims,\r\n num_heads * num_levels * num_points)\r\n self.value_proj = nn.Linear(embed_dims, embed_dims)\r\n self.output_proj = nn.Linear(embed_dims, embed_dims)\r\n self.init_weights()\r\n\r\n def init_weights(self):\r\n \"\"\"Default initialization for Parameters of Module.\"\"\"\r\n constant_init(self.sampling_offsets, 0.)\r\n thetas = torch.arange(\r\n self.num_heads,\r\n dtype=torch.float32) * (2.0 * math.pi / self.num_heads)\r\n grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)\r\n grid_init = (grid_init /\r\n grid_init.abs().max(-1, keepdim=True)[0]).view(\r\n self.num_heads, 1, 1,\r\n 2).repeat(1, self.num_levels, self.num_points, 1)\r\n for i in range(self.num_points):\r\n grid_init[:, :, i, :] *= i + 1\r\n\r\n self.sampling_offsets.bias.data = grid_init.view(-1)\r\n constant_init(self.attention_weights, val=0., bias=0.)\r\n xavier_init(self.value_proj, distribution='uniform', bias=0.)\r\n xavier_init(self.output_proj, distribution='uniform', bias=0.)\r\n self._is_init = True\r\n\r\n @deprecated_api_warning({'residual': 'identity'},\r\n cls_name='MultiScaleDeformableAttention')\r\n def forward(self,\r\n query,\r\n key=None,\r\n value=None,\r\n identity=None,\r\n query_pos=None,\r\n key_padding_mask=None,\r\n reference_points=None,\r\n spatial_shapes=None,\r\n level_start_index=None,\r\n flag='decoder',\r\n **kwargs):\r\n \"\"\"Forward Function of MultiScaleDeformAttention.\r\n\r\n Args:\r\n query (Tensor): Query of Transformer with shape\r\n (num_query, bs, embed_dims).\r\n key (Tensor): The key tensor with shape\r\n `(num_key, bs, embed_dims)`.\r\n value (Tensor): The value tensor with shape\r\n `(num_key, bs, embed_dims)`.\r\n identity (Tensor): The tensor used for addition, with the\r\n same shape as `query`. Default None. If None,\r\n `query` will be used.\r\n query_pos (Tensor): The positional encoding for `query`.\r\n Default: None.\r\n key_pos (Tensor): The positional encoding for `key`. Default\r\n None.\r\n reference_points (Tensor): The normalized reference\r\n points with shape (bs, num_query, num_levels, 2),\r\n all elements is range in [0, 1], top-left (0,0),\r\n bottom-right (1, 1), including padding area.\r\n or (N, Length_{query}, num_levels, 4), add\r\n additional two dimensions is (w, h) to\r\n form reference boxes.\r\n key_padding_mask (Tensor): ByteTensor for `query`, with\r\n shape [bs, num_key].\r\n spatial_shapes (Tensor): Spatial shape of features in\r\n different levels. With shape (num_levels, 2),\r\n last dimension represents (h, w).\r\n level_start_index (Tensor): The start index of each level.\r\n A tensor has shape ``(num_levels, )`` and can be represented\r\n as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...].\r\n\r\n Returns:\r\n Tensor: forwarded results with shape [num_query, bs, embed_dims].\r\n \"\"\"\r\n\r\n if value is None:\r\n value = query\r\n\r\n if identity is None:\r\n identity = query\r\n if query_pos is not None:\r\n query = query + query_pos\r\n if not self.batch_first:\r\n # change to (bs, num_query ,embed_dims)\r\n query = query.permute(1, 0, 2)\r\n value = value.permute(1, 0, 2)\r\n\r\n bs, num_query, _ = query.shape\r\n bs, num_value, _ = value.shape\r\n assert (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() == num_value\r\n\r\n value = self.value_proj(value)\r\n if key_padding_mask is not None:\r\n value = value.masked_fill(key_padding_mask[..., None], 0.0)\r\n value = value.view(bs, num_value, self.num_heads, -1)\r\n\r\n sampling_offsets = self.sampling_offsets(query).view(\r\n bs, num_query, self.num_heads, self.num_levels, self.num_points, 2)\r\n attention_weights = self.attention_weights(query).view(\r\n bs, num_query, self.num_heads, self.num_levels * self.num_points)\r\n attention_weights = attention_weights.softmax(-1)\r\n\r\n attention_weights = attention_weights.view(bs, num_query,\r\n self.num_heads,\r\n self.num_levels,\r\n self.num_points)\r\n if reference_points.shape[-1] == 2:\r\n offset_normalizer = torch.stack(\r\n [spatial_shapes[..., 1], spatial_shapes[..., 0]], -1)\r\n sampling_locations = reference_points[:, :, None, :, None, :] \\\r\n + sampling_offsets \\\r\n / offset_normalizer[None, None, None, :, None, :]\r\n elif reference_points.shape[-1] == 4:\r\n sampling_locations = reference_points[:, :, None, :, None, :2] \\\r\n + sampling_offsets / self.num_points \\\r\n * reference_points[:, :, None, :, None, 2:] \\\r\n * 0.5\r\n else:\r\n raise ValueError(\r\n f'Last dim of reference_points must be'\r\n f' 2 or 4, but get {reference_points.shape[-1]} instead.')\r\n if torch.cuda.is_available() and value.is_cuda:\r\n\r\n # using fp16 deformable attention is unstable because it performs many sum operations\r\n if value.dtype == torch.float16:\r\n MultiScaleDeformableAttnFunction = MultiScaleDeformableAttnFunction_fp32\r\n else:\r\n MultiScaleDeformableAttnFunction = MultiScaleDeformableAttnFunction_fp32\r\n output = MultiScaleDeformableAttnFunction.apply(\r\n value, spatial_shapes, level_start_index, sampling_locations,\r\n attention_weights, self.im2col_step)\r\n else:\r\n output = multi_scale_deformable_attn_pytorch(\r\n value, spatial_shapes, sampling_locations, attention_weights)\r\n\r\n output = self.output_proj(output)\r\n\r\n if not self.batch_first:\r\n # (num_query, bs ,embed_dims)\r\n output = output.permute(1, 0, 2)\r\n\r\n return self.dropout(output) + identity\r" } ]
import numpy as np import torch import torch.nn as nn from torch.nn.init import normal_ from torchvision.transforms.functional import rotate from mmcv.cnn import xavier_init from mmcv.cnn.bricks.transformer import build_transformer_layer_sequence, build_positional_encoding from mmcv.runner.base_module import BaseModule from ...utils.builder import BEV_CONSTRUCTOR from projects.bevformer.modules.temporal_self_attention import TemporalSelfAttention from projects.bevformer.modules.spatial_cross_attention import MSDeformableAttention3D from projects.bevformer.modules.decoder import CustomMSDeformableAttention
8,461
#---------------------------------------------------------------------------------------# # LaneSegNet: Map Learning with Lane Segment Perception for Autonomous Driving # # Source code: https://github.com/OpenDriveLab/LaneSegNet # # Copyright (c) OpenDriveLab. All rights reserved. # #---------------------------------------------------------------------------------------# @BEV_CONSTRUCTOR.register_module() class BEVFormerConstructer(BaseModule): """Implements the BEVFormer BEV Constructer. Args: as_two_stage (bool): Generate query from encoder features. Default: False. num_feature_levels (int): Number of feature maps from FPN: Default: 4. two_stage_num_proposals (int): Number of proposals when set `as_two_stage` as True. Default: 300. """ def __init__(self, num_feature_levels=4, num_cams=6, embed_dims=256, rotate_prev_bev=True, use_shift=True, use_can_bus=True, can_bus_norm=True, use_cams_embeds=True, pc_range=[-51.2, -51.2, -5.0, 51.2, 51.2, 3.0], bev_h=200, bev_w=200, rotate_center=[100, 100], encoder=None, positional_encoding=None, **kwargs): super(BEVFormerConstructer, self).__init__(**kwargs) self.embed_dims = embed_dims self.num_feature_levels = num_feature_levels self.num_cams = num_cams self.fp16_enabled = False self.rotate_prev_bev = rotate_prev_bev self.use_shift = use_shift self.use_can_bus = use_can_bus self.can_bus_norm = can_bus_norm self.use_cams_embeds = use_cams_embeds self.encoder = build_transformer_layer_sequence(encoder) self.positional_encoding = build_positional_encoding(positional_encoding) self.pc_range = pc_range self.real_w = self.pc_range[3] - self.pc_range[0] self.real_h = self.pc_range[4] - self.pc_range[1] self.bev_h = bev_h self.bev_w = bev_w self.rotate_center = rotate_center self.init_layers() def init_layers(self): self.bev_embedding = nn.Embedding( self.bev_h * self.bev_w, self.embed_dims) self.level_embeds = nn.Parameter(torch.Tensor( self.num_feature_levels, self.embed_dims)) self.cams_embeds = nn.Parameter( torch.Tensor(self.num_cams, self.embed_dims)) self.can_bus_mlp = nn.Sequential( nn.Linear(18, self.embed_dims // 2), nn.ReLU(inplace=True), nn.Linear(self.embed_dims // 2, self.embed_dims), nn.ReLU(inplace=True), ) if self.can_bus_norm: self.can_bus_mlp.add_module('norm', nn.LayerNorm(self.embed_dims)) def init_weights(self): """Initialize the transformer weights.""" for p in self.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) for m in self.modules(): if isinstance(m, MSDeformableAttention3D) or isinstance(m, TemporalSelfAttention) \
#---------------------------------------------------------------------------------------# # LaneSegNet: Map Learning with Lane Segment Perception for Autonomous Driving # # Source code: https://github.com/OpenDriveLab/LaneSegNet # # Copyright (c) OpenDriveLab. All rights reserved. # #---------------------------------------------------------------------------------------# @BEV_CONSTRUCTOR.register_module() class BEVFormerConstructer(BaseModule): """Implements the BEVFormer BEV Constructer. Args: as_two_stage (bool): Generate query from encoder features. Default: False. num_feature_levels (int): Number of feature maps from FPN: Default: 4. two_stage_num_proposals (int): Number of proposals when set `as_two_stage` as True. Default: 300. """ def __init__(self, num_feature_levels=4, num_cams=6, embed_dims=256, rotate_prev_bev=True, use_shift=True, use_can_bus=True, can_bus_norm=True, use_cams_embeds=True, pc_range=[-51.2, -51.2, -5.0, 51.2, 51.2, 3.0], bev_h=200, bev_w=200, rotate_center=[100, 100], encoder=None, positional_encoding=None, **kwargs): super(BEVFormerConstructer, self).__init__(**kwargs) self.embed_dims = embed_dims self.num_feature_levels = num_feature_levels self.num_cams = num_cams self.fp16_enabled = False self.rotate_prev_bev = rotate_prev_bev self.use_shift = use_shift self.use_can_bus = use_can_bus self.can_bus_norm = can_bus_norm self.use_cams_embeds = use_cams_embeds self.encoder = build_transformer_layer_sequence(encoder) self.positional_encoding = build_positional_encoding(positional_encoding) self.pc_range = pc_range self.real_w = self.pc_range[3] - self.pc_range[0] self.real_h = self.pc_range[4] - self.pc_range[1] self.bev_h = bev_h self.bev_w = bev_w self.rotate_center = rotate_center self.init_layers() def init_layers(self): self.bev_embedding = nn.Embedding( self.bev_h * self.bev_w, self.embed_dims) self.level_embeds = nn.Parameter(torch.Tensor( self.num_feature_levels, self.embed_dims)) self.cams_embeds = nn.Parameter( torch.Tensor(self.num_cams, self.embed_dims)) self.can_bus_mlp = nn.Sequential( nn.Linear(18, self.embed_dims // 2), nn.ReLU(inplace=True), nn.Linear(self.embed_dims // 2, self.embed_dims), nn.ReLU(inplace=True), ) if self.can_bus_norm: self.can_bus_mlp.add_module('norm', nn.LayerNorm(self.embed_dims)) def init_weights(self): """Initialize the transformer weights.""" for p in self.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) for m in self.modules(): if isinstance(m, MSDeformableAttention3D) or isinstance(m, TemporalSelfAttention) \
or isinstance(m, CustomMSDeformableAttention):
3
2023-12-06 07:13:48+00:00
12k
RobertCsordas/moe_attention
tasks/simple/language_model/enwik8_transformer.py
[ { "identifier": "TransformerLanguageModel", "path": "models/transformer_language_model.py", "snippet": "class TransformerLanguageModel(LoggingLayer, torch.nn.Module):\n def __init__(self, voc_size: int, embedding_size: Optional[int], state_size: int, dropout: float,\n tied_embedding: bool, layers: List[torch.nn.Module], n_prev_states: int,\n n_prev_states_test: Optional[int] = None, adaptive_cutoffs: List[int] = [],\n same_length_eval: bool = True, norm_before_output: bool = False,\n p_drop_layer: float = 0.0, use_last_state: bool = False, same_length: bool = False):\n\n super().__init__()\n\n self.embedding = torch.nn.Embedding(voc_size, embedding_size or state_size)\n torch.nn.init.kaiming_normal_(self.embedding.weight, mode=\"fan_in\", nonlinearity=\"linear\")\n\n self.shared_layers = all([la is layers[0] for la in layers])\n\n if embedding_size is None:\n self.embedding_adapter = lambda x: x\n else:\n self.embedding_adapter = torch.nn.Linear(embedding_size, state_size)\n\n self.dropout = torch.nn.Dropout(dropout)\n self.layers = layers\n self.unique_layers = torch.nn.ModuleList(unique_obejcts(layers))\n self.output_adapter = lambda x: x\n self.n_prev_states = n_prev_states\n self.n_prev_states_test = n_prev_states_test or n_prev_states\n self.same_length_eval = same_length_eval\n self.embedding_scale = math.sqrt(state_size)\n self.p_drop_layer = p_drop_layer\n self.use_last_state = use_last_state\n self.same_length = same_length\n self.iter = 0\n\n self.adaptive = bool(adaptive_cutoffs)\n\n out_proj_size = (embedding_size or state_size) if tied_embedding else state_size\n if self.adaptive:\n self.output = framework.layers.CustomAdaptiveLogSoftmaxWithLoss(\n out_proj_size, voc_size, adaptive_cutoffs, div_value=1,\n tied_to=self.embedding if tied_embedding else None)\n else:\n self.output = torch.nn.Linear(out_proj_size, voc_size)\n\n if norm_before_output:\n self.out_norm = torch.nn.LayerNorm(state_size)\n else:\n self.out_norm = lambda x: x\n\n if tied_embedding:\n if not self.adaptive:\n self.output.weight = self.embedding.weight\n if embedding_size is not None:\n self.output_adapter = torch.nn.Linear(state_size, embedding_size)\n\n @staticmethod\n def generate_history_mask(sz: int, device: torch.device) -> torch.Tensor:\n return torch.tril(torch.ones(sz, sz, dtype=torch.bool, device=device), diagonal=-1)\n\n def gen_output(self, x: torch.Tensor, target: Optional[torch.Tensor]) -> torch.Tensor:\n net = self.out_norm(x)\n net = self.output_adapter(net)\n net = self.dropout(net)\n\n if self.adaptive:\n net = self.output(net.transpose(0, 1), target)\n else:\n net = self.output(net.transpose(0, 1))\n\n return net\n\n def forward(self, x: torch.Tensor, target: Optional[torch.Tensor], state) -> Tuple[torch.Tensor, Any]:\n causality_mask = Transformer.generate_square_subsequent_mask(x.shape[0], x.device)\n\n net = self.dropout(self.embedding(x.T.long()))\n net = self.embedding_adapter(net)\n net = net * self.embedding_scale\n\n new_state = []\n features = [net]\n\n n_prev_states = self.n_prev_states if self.training else self.n_prev_states_test\n\n same_length = self.same_length or ((not self.training) and self.same_length_eval)\n if same_length and state is not None:\n causality_mask = [self.generate_history_mask(x.shape[0], x.device)] + \\\n [torch.zeros_like(causality_mask)] * (len(state[0]) - 1) + [causality_mask]\n causality_mask = torch.cat(causality_mask, -1)\n\n\n plot_cossim = (self.iter % 100 == 0 and self.training)\n for li, l in enumerate(self.layers):\n if n_prev_states > 0:\n if li == 0:\n # Pos offset should be constant for all layers\n pos_offset = sum(s.shape[1] for s in state[0]) if state is not None else 0\n\n # Concatenate the new state with the previous states\n li_r = -1 if self.use_last_state else li\n s = (state[li_r] + [net]) if state is not None else [net]\n attend_to = torch.cat(s, 1)\n\n if not self.use_last_state:\n s[-1] = s[-1].detach()\n new_state.append(s[-n_prev_states:])\n else:\n pos_offset = None\n attend_to = None\n\n net_o = l(net, mask=AttentionMask(None, causality_mask), attend_to=attend_to,\n pos_offset=pos_offset)\n\n if plot_cossim:\n features.append(net_o)\n\n with torch.no_grad():\n ndiff = torch.norm(net_o - net, p=2, dim=-1)\n n_in = torch.norm(net, p=2, dim=-1)\n self.log(f\"activation_norm/abs_update_layer_{li}\", ndiff.mean())\n self.log(f\"activation_norm/in_layer_{li}\", n_in.mean())\n self.log(f\"activation_norm/rel_update_layer_{li}\", (ndiff/n_in.clamp(min=torch.finfo(n_in.dtype).eps)).mean())\n\n if self.training and self.p_drop_layer > 0.0:\n net = torch.where(torch.rand_like(net_o[..., 0:1]) < self.p_drop_layer, net, net_o)\n else:\n net = net_o\n\n if self.use_last_state and n_prev_states > 0:\n # If we carry over the last state, save it here\n new_state = [((state[0] if state is not None else []) + [net.detach()])[-n_prev_states:]]\n\n if plot_cossim:\n with torch.no_grad():\n f_sample = [f.view(-1, f.shape[-1])[:1024] for f in features]\n f_sample_all = torch.stack(f_sample, -2)\n scores = framework.utils.cossim(f_sample_all, f_sample_all).mean(0)\n self.log(\"feature_cossim\", framework.visualize.plot.Heatmap(scores, range=(0, 1), textval=False))\n\n outs = F.softmax(self.gen_output(f_sample_all, target).transpose(0, 1), -1)\n scores = framework.utils.cossim(outs, outs).mean(0)\n self.log(\"out_dist_cossim\", framework.visualize.plot.Heatmap(scores, range=(0, 1), textval=False))\n\n real_out = outs[:, -1]\n for i in range(outs.shape[-2] - 1):\n self.log(f\"out_diff_{i}\", (outs[:, i] - real_out).norm(dim=-1, p=1).mean())\n\n del outs\n\n\n del features\n\n net = self.gen_output(net, target)\n self.iter += 1\n\n return net, new_state" }, { "identifier": "task", "path": "tasks/task_db.py", "snippet": "def task(name: Optional[str] = None):\n def wrapper(cls):\n n = TASK_PREFIX + (name or camel_to_snake(cls.__name__))\n assert n not in TASKS, f\"Task {n} already exists\"\n TASKS[n] = cls\n return cls\n return wrapper" }, { "identifier": "args", "path": "tasks/task_db.py", "snippet": "def args(fn):\n global ARGS_REGISTERS\n ARGS_REGISTERS.append(fn)\n return fn" }, { "identifier": "TransformerLMMixin", "path": "tasks/simple/language_model/transformer_lm_mixin.py", "snippet": "class TransformerLMMixin:\n helper: framework.helpers.TrainingHelper\n\n def is_preln(self) -> bool:\n return \"preln\" in self.helper.args.transformer.variant\n\n def topk_activation(self, x: torch.Tensor) -> torch.Tensor:\n nx = -x\n return torch.masked_fill(x, nx <= nx.kthvalue(self.helper.args.transformer.topk_value, keepdim=True)[0], 0)\n\n def get_layers(self) -> List[torch.nn.Module]:\n # pyright: reportOptionalMemberAccess=false\n if self.helper.args.transformer.activation == \"relu\":\n activation = F.relu\n elif self.helper.args.transformer.activation == \"topk\":\n activation = self.topk_activation\n elif self.helper.args.transformer.activation == \"identity\":\n activation = lambda x: x\n elif self.helper.args.transformer.activation == \"sigmoid\":\n activation = torch.sigmoid\n elif self.helper.args.transformer.activation == \"gelu\":\n activation = F.gelu\n elif self.helper.args.transformer.activation == \"softmax\":\n activation = lambda x: F.softmax(x, dim=-1)\n else:\n raise ValueError(f\"Invalid activation: {self.helper.args.transformer.activation}\")\n\n base_args = dict(\n d_model=self.helper.args.state_size,\n nhead=self.helper.args.transformer.n_heads,\n dropout=self.helper.args.dropout,\n activation=activation\n )\n\n if self.helper.args.transformer.variant not in {\"preln_moe\", \"moe\"}:\n base_args[\"dim_feedforward\"]=int(self.helper.args.state_size * self.helper.args.transformer.ff_multiplier)\n\n\n extra_args = {} if not self.helper.args.transformer.variant.endswith(\"_gelu\") else {\n \"activation\": F.gelu,\n \"drop_expand\": False\n }\n\n\n if self.helper.args.transformer.variant in {\"preln_relative\"}:\n mklayer = lambda: PrelnRelativeTransformerEncoderLayer(\n **base_args, **extra_args, test_pos_clamp=self.helper.args.lm.trafo.test_pos_clamp,\n n_layers=self.helper.args.transformer.encoder_n_layers,\n head_projection_size=self.helper.args.transformer.head_projection_size,)\n elif self.helper.args.transformer.variant in {\"preln_moeatt\"}:\n mklayer = lambda: MoeAttentionRelativeTransformerEncoderLayer(\n **base_args, **extra_args, moe_att_n_experts=self.helper.args.moe.att.n_experts,\n n_layers=self.helper.args.transformer.encoder_n_layers,\n head_projection_size=self.helper.args.transformer.head_projection_size,\n att_perplexity_reg=self.helper.args.moe.perplexity_reg if self.helper.args.moe.att.perplexity_reg is None else self.helper.args.moe.att.perplexity_reg,\n expert_dropout=self.helper.args.moe.drop_expert if self.helper.args.moe.att.drop_expert is None else self.helper.args.moe.att.drop_expert,\n att_selection_mode=self.helper.args.moe.att.selection_mode,\n preln=self.is_preln(),\n attention_variant=self.helper.args.moe.att.variant,\n q_expert=self.helper.args.moe.att.q_expert,\n k_expert=self.helper.args.moe.att.k_expert,\n v_expert=self.helper.args.moe.att.v_expert,\n o_expert=self.helper.args.moe.att.o_expert,\n norm_qk_score=self.helper.args.moe.att.norm_qk,\n v_projection_size=self.helper.args.moe.att.v_size,\n same_sel=self.helper.args.moe.att.same_sel,\n moe_k=self.helper.args.moe.att.k,\n qside_n_experts=self.helper.args.moe.att.qside_n_experts,\n shared_experts=self.helper.args.moe.att.shared_experts,\n kq_n_experts=self.helper.args.moe.att.kq_n_experts,\n separate_kq_sel=self.helper.args.moe.att.separate_kq_sel,\n moa_mode=self.helper.args.moa.mode,\n cvloss=self.helper.args.moa.cvloss,\n switchloss=self.helper.args.moa.switchloss,\n zloss=self.helper.args.moa.zloss,\n rotate_fraction=self.helper.args.rope.rotate_fraction,\n rope_base=self.helper.args.rope.base,\n moeatt_norm_init=self.helper.args.moe.att.norm_init)\n elif self.helper.args.transformer.variant in {\"preln_rope\", \"rope\"}:\n mklayer = lambda: FastRopeTransformerEncoderLayer(\n **base_args, **extra_args,\n n_layers=self.helper.args.transformer.encoder_n_layers,\n head_projection_size=self.helper.args.transformer.head_projection_size,\n preln=self.is_preln(), rotate_fraction = self.helper.args.rope.rotate_fraction,\n rope_base=self.helper.args.rope.base)\n elif self.helper.args.transformer.variant in {\"preln_moe\", \"moe\"}:\n # def __init__(self, d_model, nhead, n_bins: int, bin_size: int, n_layers: int, dim_feedforward=2048,\n mklayer = lambda: RelativeMoeTransformerEncoderLayer(\n **base_args, **extra_args, preln=self.is_preln(),\n test_pos_clamp=self.helper.args.lm.trafo.test_pos_clamp,\n n_layers=self.helper.args.transformer.encoder_n_layers,\n n_experts=self.helper.args.moe.n_experts,\n expert_size=self.helper.args.moe.expert_size,\n dropout_mode=self.helper.args.kvmem.dropout,\n selection_mode=self.helper.args.moe.selection_mode,\n perplexity_reg=self.helper.args.moe.perplexity_reg,\n n_heads=self.helper.args.pkm.n_heads,\n norm_keys=self.helper.args.moe.norm_keys,\n perplexity_reg_mode=self.helper.args.moe.perplexity_reg_mode,\n n_random=self.helper.args.moe.n_random,\n reg_type=self.helper.args.moe.reg_type,\n topk_mode=self.helper.args.moe.topk_mode,\n head_projection_size=self.helper.args.transformer.head_projection_size,\n activation_after_topk=self.helper.args.moe.activation_after_topk,\n drop_parallel=self.helper.args.moe.drop_parallel,\n norm_key_init=self.helper.args.moe.norm_key_init,\n norm_value_init=self.helper.args.moe.norm_value_init,\n normalize_expert_sel_init=self.helper.args.moe.norm_expert_sel_init,\n identical_init=self.helper.args.moe.identical_init,\n sel_norm=self.helper.args.moe.sel_norm,\n ln_affine=self.helper.args.transformer.ln_affine,\n moe_dropout_factor=self.helper.args.moe.dropout_factor,\n drop_expert=self.helper.args.moe.drop_expert,\n sync_distributed=self.helper.args.moe.sync_distributed,\n modulation_amplitude=self.helper.args.moe.modulation_amplitude,\n moe_init_scale=self.helper.args.moe.init_scale,\n moe_attention=self.helper.args.moe.att.enable,\n moe_att_n_experts=self.helper.args.moe.att.n_experts,\n moe_att_expert_dropout=self.helper.args.moe.drop_expert if self.helper.args.moe.att.drop_expert is None else self.helper.args.moe.att.drop_expert,\n moe_att_selection_mode=self.helper.args.moe.att.selection_mode,\n moe_att_variant=self.helper.args.moe.att.variant,\n moe_att_ppl_reg=self.helper.args.moe.perplexity_reg if self.helper.args.moe.att.perplexity_reg is None else self.helper.args.moe.att.perplexity_reg,\n moe_att_k=self.helper.args.moe.att.k,\n q_expert=self.helper.args.moe.att.q_expert,\n k_expert=self.helper.args.moe.att.k_expert,\n v_expert=self.helper.args.moe.att.v_expert,\n o_expert=self.helper.args.moe.att.o_expert,\n v_projection_size=self.helper.args.moe.att.v_size,\n qside_n_experts=self.helper.args.moe.att.qside_n_experts,\n moe_att_shared_experts=self.helper.args.moe.att.shared_experts,\n moe_att_kq_n_experts=self.helper.args.moe.att.kq_n_experts,\n moe_att_separate_kq_sel=self.helper.args.moe.att.separate_kq_sel,\n rotate_fraction=self.helper.args.rope.rotate_fraction,\n rope_base=self.helper.args.rope.base,\n moe_att_norm_init=self.helper.args.moe.att.norm_init,\n moe_att_same_sel=self.helper.args.moe.att.same_sel,\n moe_att_norm_retrieval=self.helper.args.moe.att.norm_ret)\n else:\n assert False, f\"Invalid variant \\\"{self.helper.args.transformer.variant}\\\"\"\n\n layers = [mklayer() for _ in range(self.helper.args.transformer.encoder_n_layers)]\n return layers\n\n\n def fix_init(self, model):\n init_std = 0.02\n\n torch.nn.init.normal_(model.embedding.weight, 0.0, init_std)\n # torch.nn.init.normal_(model.embedding_adapter.weight, 0.0, init_std)\n\n initialized = 0\n for m in model.modules():\n if isinstance(m, (torch.nn.Linear, torch.nn.Embedding)) and hasattr(m, \"weight\"):\n torch.nn.init.normal_(m.weight, 0.0, init_std)\n initialized += m.weight.numel()\n if isinstance(m, (torch.nn.Linear, torch.nn.LayerNorm)) and m.bias is not None:\n torch.nn.init.constant_(m.bias, 0)\n initialized += m.bias.numel()\n if isinstance(m, (torch.nn.LayerNorm)) and m.weight is not None:\n torch.nn.init.normal_(m.weight, 1.0, init_std)\n initialized += m.weight.numel()\n if isinstance(m, MoE):\n torch.nn.init.normal_(m.keys, 0.0, init_std)\n torch.nn.init.normal_(m.values, 0.0, init_std)\n if m.expert_sel is not None:\n torch.nn.init.normal_(m.expert_sel, 0.0, init_std)\n m.renorm_keep_std(m.expert_sel)\n initialized += m.expert_sel.numel()\n initialized += m.keys.numel() + m.values.numel()\n if isinstance(m, (FullMoeRelativeAttentionCore)):\n for p in m.parameters():\n torch.nn.init.normal_(p, 0.0, init_std)\n initialized += p.numel()\n\n for s in m.selections.values():\n m.renorm_keep_std(s)\n\n print(f\"Reinitialized {initialized/self.n_weights*100:.3f}% weights\")\n\n def create_model(self) -> torch.nn.Module:\n self.validation_started_on = None\n # pyright: reportOptionalMemberAccess=false\n tlayers = self.get_layers()\n\n model = TransformerLanguageModel(\n len(self.train_set.vocabulary), self.helper.args.embedding_size,\n self.helper.args.state_size, self.helper.args.dropout,\n tied_embedding=self.helper.args.tied_embedding,\n layers=tlayers, n_prev_states=self.helper.args.lm.trafo.context_blocks,\n n_prev_states_test=self.helper.args.lm.trafo.test_context_blocks,\n same_length_eval=self.helper.args.lm.trafo.same_length_eval,\n p_drop_layer=self.helper.args.transformer.p_drop_layer,\n same_length=self.helper.args.lm.trafo.same_length,\n use_last_state=self.helper.args.lm.trafo.last_layer_context,\n norm_before_output=self.is_preln())\n\n self.n_weights = sum(p.numel() for p in model.parameters())\n\n with torch.no_grad():\n if self.is_preln():\n model.embedding_scale = 1.0\n elif self.helper.args.lm.trafo.xl_init:\n self.fix_init(model)\n elif self.helper.args.lm.trafo.embedding_mode_init==\"scale_to_sqrt_dmodel\":\n norm = model.embedding.weight.norm(dim=-1).mean()\n model.embedding_scale = math.sqrt(self.helper.args.state_size) / norm\n elif self.helper.args.lm.trafo.embedding_mode_init==\"one_and_scale_to_sqrt_dmodel\":\n norm = model.embedding.weight.norm(dim=-1).mean()\n model.embedding_scale = math.sqrt(self.helper.args.state_size)\n model.embedding.weight.mul_(1.0 / norm)\n elif self.helper.args.lm.trafo.embedding_mode_init==\"init_to_sqrt_dmodel\":\n norm = model.embedding.weight.norm(dim=-1, keepdim=True)\n model.embedding_scale=1.0\n model.embedding.weight.mul_(math.sqrt(self.helper.args.state_size) / norm)\n\n self.visualizer = LayerVisualizer(model, {\n \"mha.plot_head_details\": self.helper.args.transformer.plot_head_details,\n \"mha.no_pos_vs_content\": True\n })\n\n self.input_history = []\n return model\n\n\n def train_step(self) -> Tuple[Result, Dict[str, Any]]:\n if self.helper.args.kvmem.norm_values:\n with torch.no_grad():\n for m in self.model.modules():\n if isinstance(m, torch.nn.EmbeddingBag):\n m.weight.div_(m.weight.norm(dim=-1, keepdim=True))\n\n return super().train_step()\n\n def get_optimizer_param_list(self):\n params = list(self.model.parameters())\n sel_params = []\n expert_params = []\n\n if self.helper.args.moe.sel_lr_multipler != 1.0:\n for m in self.model.modules():\n if isinstance(m, MoE):\n sel_params += [m.expert_sel]\n\n if self.helper.args.moe.expert_lr_multipler != 1.0:\n for m in self.model.modules():\n if isinstance(m, MoE):\n expert_params += [m.keys, m.values]\n\n excluded_params = [id(p) for p in sel_params + expert_params]\n params = [p for p in params if id(p) not in excluded_params]\n\n if not excluded_params:\n return params\n\n return [\n {\"params\": params},\n {\"params\": sel_params, \"lr\": self.helper.args.lr * self.helper.args.moe.sel_lr_multipler},\n {\"params\": expert_params, \"lr\": self.helper.args.lr * self.helper.args.moe.expert_lr_multipler},\n ]\n\n def validate_on_name(self, name: str) -> Tuple[Any, float]:\n self.validation_started_on = name\n self.validation_step = 0\n\n return super().validate_on_name(name)\n\n def get_steplabels(self, data: Dict[str, torch.Tensor]) -> List[str]:\n out = self.train_set.vocabulary(data[\"data\"][:, 0].cpu().numpy().tolist())\n inp = [self.train_set.vocabulary(x[:-1].cpu().numpy().tolist()) for x in self.input_history] + [out]\n return sum(inp, [])[:-1], out[1:]\n\n def run_model(self, data: Dict[str, torch.Tensor], ubatch: int = 0) -> Tuple[Result, Dict[str, Any]]:\n plot_now = ((ubatch == 0) and (self.helper.args.debug_plot_interval is not None) and \\\n ((self.helper.state.iter % self.helper.args.debug_plot_interval) == 0) and self.model.training)\n\n is_dumping = self.validation_started_on and self.helper.args.dump_validation_plots\n\n if plot_now or is_dumping:\n inp, outp = self.get_steplabels(data)\n params = {\"steplabel\": inp, \"target_labels\": outp}\n if self.helper.args.plot.n_steps:\n params[\"n_steps\"] = self.helper.args.plot.n_steps\n\n self.visualizer.prepare(params)\n\n if ubatch == 0 and self.helper.args.lm.trafo.context_blocks > 0:\n if len(self.input_history) >= self.helper.args.lm.trafo.context_blocks:\n self.input_history.pop(0)\n self.input_history.append(data[\"data\"][:, 0])\n\n res, plots = super().run_model(data, ubatch)\n\n if plot_now or is_dumping:\n plots.update({f\"activations/{k}\": v for k, v in self.visualizer.plot().items()})\n\n if is_dumping:\n os.makedirs(self.helper.args.dump_validation_plots, exist_ok=True)\n torch.save(plots, f\"{self.helper.args.dump_validation_plots}/{self.validation_started_on}_{self.validation_step:04d}.pth\")\n self.validation_step += 1\n\n return res, plots" }, { "identifier": "SimpleTask", "path": "tasks/simple/simple_task.py", "snippet": "class SimpleTask(Task):\n MAX_LENGHT_PER_BATCH = None\n train_set: torch.utils.data.Dataset\n train_loader: torch.utils.data.DataLoader\n model: torch.nn.Module\n\n def create_datasets(self):\n raise NotImplementedError()\n\n def create_model_interface(self):\n raise NotImplementedError()\n\n def create_model(self) -> torch.nn.Module:\n raise NotImplementedError()\n\n def create_state(self):\n pass\n\n @property\n def amp_enabled(self):\n return torch.cuda.is_available() and self.helper.args.amp\n\n @property\n def time_dim(self) -> int:\n return 1 - self.batch_dim\n\n def __init__(self, helper: framework.helpers.TrainingHelper):\n super().__init__(helper)\n\n self.avg_num_chunks = framework.utils.Average()\n self.reg_loss_average = framework.utils.DictAverage()\n self.max_grad = 0\n self.time_sum = 0\n\n self.create_datasets()\n self.create_loaders()\n self.model = self.create_model()\n self.model = self.model.to(self.helper.device)\n\n self.create_model_interface()\n self.create_optimizer()\n self.create_lr_scheduler()\n\n self.regularizer = LayerRegularizer(\n self.model, self.helper.args.stop_after, self.helper.args.reg_scales, self.helper.args.reg_lin_decay)\n\n self.scaler = torch.cuda.amp.GradScaler(enabled=self.amp_enabled)\n self.helper.saver[\"scaler\"] = self.scaler\n\n n_params = sum(p.numel() for p in self.model.parameters())\n print(f\"Total number of model parameters: {n_params}\")\n\n self.helper.saver[\"model\"] = self.model\n self.create_state()\n self.helper.restore()\n\n self.fetcher = None\n self.helper.log({\"n_params\": n_params})\n\n if self.helper.args.nan_detect:\n # based on https://discuss.pytorch.org/t/finding-source-of-nan-in-forward-pass/51153/3\n def nan_hook(self, inp, output):\n if not isinstance(output, tuple):\n outputs = [output]\n else:\n outputs = output\n\n for i, out in enumerate(outputs):\n def detect(out):\n nan_mask = ~torch.isfinite(out)\n if nan_mask.any():\n print(\"In\", self.__class__.__name__)\n raise RuntimeError(f\"Found non-finite in output {i} at indices: \", nan_mask.nonzero(), \"where:\", out[nan_mask.nonzero()[:, 0].unique(sorted=True)])\n\n U.apply_recursive(out, detect, torch.is_tensor)\n\n for submodule in self.model.modules():\n submodule.register_forward_hook(nan_hook)\n\n def fetch_thread(self):\n data = self.prepare_data(self.get_train_batch())\n n_chunks = self.get_n_chunks(data)\n d_chunks = self.chunk_batch_dim(data, n_chunks)\n\n return data, d_chunks\n\n def create_train_loader(self, loader: torch.utils.data.Dataset, seed: Optional[int] = None,\n batch_size: Optional[int] = None) -> torch.utils.data.DataLoader:\n\n return super().create_train_loader_bs(loader, batch_size or self.helper.args.batch_size, seed)\n\n def set_train_set(self, ds: torch.utils.data.Dataset, seed: Optional[int] = None):\n self.train_set = ds\n self.train_loader = self.create_train_loader(self.train_set, seed)\n self.data_iter = iter(self.train_loader)\n\n def create_loaders(self):\n self.train_loader = self.create_train_loader(self.train_set)\n self.valid_loaders = framework.data_structures.DotDict()\n self.valid_loaders.update({k: self.create_valid_loader(v) for k, v in self.valid_sets.items()})\n\n def get_optimizer_param_list(self):\n return self.model.parameters()\n\n def create_optimizer(self):\n if self.helper.args.optimizer in [\"adam\", \"adamw\"]:\n opt = torch.optim.Adam if self.helper.args.optimizer == \"adam\" else torch.optim.AdamW\n self.set_optimizer(opt(self.get_optimizer_param_list(), self.helper.args.lr,\n weight_decay=self.helper.args.wd, betas=self.helper.args.adam.betas,\n eps=self.helper.args.adam.eps))\n elif self.helper.args.optimizer == \"adagrad\":\n self.set_optimizer(torch.optim.Adagrad(self.get_optimizer_param_list(), self.helper.args.lr,\n weight_decay=self.helper.args.wd))\n elif self.helper.args.optimizer == \"sgd\":\n self.set_optimizer(torch.optim.SGD(self.get_optimizer_param_list(), self.helper.args.lr,\n weight_decay=self.helper.args.wd, momentum=0.9))\n else:\n assert False, f\"Unsupported optimizer: {self.helper.args.optimizer}\"\n\n def set_optimizer(self, optimizer: torch.optim.Optimizer):\n self.optimizer = optimizer\n self.helper.saver.register(\"optimizer\", self.optimizer, replace=True)\n\n def get_train_batch(self) -> Dict[str, Any]:\n return next(self.data_iter)\n\n def chunk_batch_dim(self, data: Dict[str, Any], n: int) -> List[Dict[str, Any]]:\n if n == 1:\n return [data]\n\n res = [{} for _ in range(n)]\n for k, v in data.items():\n assert torch.is_tensor(v), \"Only tensors are supported by autosplitting\"\n\n bd = self.batch_dim if self.batch_dim < v.ndimension() else 0\n assert v.shape[bd] % n == 0, f\"Batch (dim {bd} of input {k} of shape {v.shape} is not divisible by {n})\"\n\n for i, c in enumerate(v.chunk(n, dim=bd)):\n res[i][k] = c\n\n # Avoid unnecessary computation.\n if \"in\" in data and \"in_len\" in data:\n for r in res:\n r[\"in\"] = r[\"in\"].narrow(1 - self.batch_dim, 0, int(r[\"in_len\"].max().item()))\n\n if \"out\" in data and \"out_len\" in data and data[\"out\"].ndim > 1:\n for r in res:\n r[\"out\"] = r[\"out\"].narrow(1 - self.batch_dim, 0, int(r[\"out_len\"].max().item()))\n\n return res\n\n def is_seq2seq_task(self, data: Dict[str, Any]) -> bool:\n return \"in_len\" in data and \"out_len\" in data\n\n def get_seq_length(self, data: Dict[str, Any]) -> int:\n # This assumes separate encoder and decoder\n return max(data[\"in\"].shape[self.time_dim], data[\"out\"].shape[self.time_dim] if data[\"out\"].ndim > 1 else 0)\n\n def get_n_chunks(self, data: Dict[str, Any]) -> int:\n if self.helper.args.n_microbatch:\n return self.helper.args.n_microbatch\n\n max_length_per_batch = self.helper.args.max_length_per_batch or self.MAX_LENGHT_PER_BATCH\n if self.is_seq2seq_task(data) and max_length_per_batch:\n # The formula below assumes quadratic memory consumption\n return int(2**int(self.get_seq_length(data) / max_length_per_batch))\n return 1\n\n def post_backward(self) -> Dict[str, Any]:\n return {}\n\n def get_batch_size(self, data: Dict[str, Any]) -> int:\n for v in data.values():\n if torch.is_tensor(v) and v.ndim > self.batch_dim:\n return v.shape[self.batch_dim]\n\n raise ValueError(\"Unable to automatically determine the local batch size.\")\n\n def train_step(self) -> Tuple[Result, Dict[str, Any]]:\n plots = {}\n\n if self.helper.args.speedtest==\"iter\":\n torch.cuda.synchronize()\n\n with self.forward_time_meter:\n self.set_lr()\n self.optimizer.zero_grad(set_to_none=True)\n\n data, d_chunks = self.fetcher.get()\n\n res_list = []\n weights = []\n\n self.avg_num_chunks.add(len(d_chunks))\n\n total_batch_size = self.get_batch_size(data)\n\n profiler = None\n # if self.helper.state.iter == 3:\n # profiler = torch.profiler.profile(activities=[torch.profiler.ProfilerActivity.CPU, torch.profiler.ProfilerActivity.CUDA], record_shapes=True, with_stack=True, profile_memory=True)\n # profiler.__enter__()\n\n\n call_pre_iter(self.model)\n for ubatch, d in enumerate(d_chunks):\n with torch.cuda.amp.autocast(enabled=self.amp_enabled):\n res, custom_plots = self.run_model(d, ubatch)\n call_before_loss(self.model)\n res_list.append(res)\n if ubatch == 0:\n plots.update(custom_plots)\n\n # weights for microbatch accumulation\n weights.append(self.get_batch_size(d) / total_batch_size)\n reg_loss, reg_log = self.regularizer.get(self.helper.state.iter)\n self.reg_loss_average.add(reg_log)\n total_loss = (res_list[-1].loss + reg_loss * self.helper.args.reg) * self.helper.get_loss_scaling()\n\n if not torch.isfinite(total_loss):\n for n, p in self.model.named_parameters():\n if not torch.isfinite(p).all():\n print(f\"Found non-finite weight {n}\")\n\n for n, p in self.model.named_buffers():\n if not torch.isfinite(p).all():\n print(f\"Found non-finite buffer {n}\")\n\n assert False, f\"Loss not finite ({total_loss})\"\n\n self.scaler.scale(total_loss * weights[-1]).backward()\n pbwout = self.post_backward()\n if ubatch == 0:\n plots.update(pbwout)\n\n if self.helper.dist_env.is_distributed:\n aops = []\n for p in self.model.parameters():\n if p.grad is None:\n continue\n aops.append(torch.distributed.all_reduce(p.grad.contiguous(), async_op=True))\n\n for a in aops:\n a.wait()\n\n\n call_post_iter(self.model)\n\n self.scaler.unscale_(self.optimizer)\n\n if self.helper.args.grad_clip:\n gn = torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.helper.args.grad_clip)\n self.max_grad = max(self.max_grad, gn)\n\n\n if self.helper.args.log_grad_norms:\n for n, p in self.model.named_parameters():\n plots[f\"grad_norms/{n}\"] = p.detach().norm().item()\n\n\n self.scaler.step(self.optimizer)\n self.scaler.update()\n\n self.helper.state.iter += 1\n res = res_list[0].__class__.merge(res_list, weights)\n\n if self.helper.args.speedtest in {\"iter\"}:\n torch.cuda.synchronize()\n\n if profiler is not None:\n profiler.__exit__(None, None, None)\n profiler.export_chrome_trace(\"trace_all.json\")\n assert False\n\n\n # if self.helper.state.iter % 20 == 0:\n\n if \"in_len\" in data:\n n_total_tokens = (data[\"in_len\"] + data[\"out_len\"]).sum()\n if self.helper.dist_env.is_distributed:\n torch.distributed.all_reduce(n_total_tokens)\n\n self.total_n_token_in_period += n_total_tokens\n\n return res, plots\n\n def plot(self, res: Result) -> Dict[str, Any]:\n res = super().plot(res)\n\n if self.helper.args.dump_logs and self.helper.dist_env.is_master():\n dump_logs(self.model, self.helper.get_storage_path(\"log_dumps\") + f\"/{self.helper.state.iter}\")\n\n if self.helper.state.iter % 20 == 1:\n res.update(get_logs(self.model))\n\n res[\"average_num_chunks\"] = self.avg_num_chunks.get()\n for k, v in self.reg_loss_average.get().items():\n res[f\"train/reg_loss/{k}\"] = v\n\n if self.helper.args.grad_clip:\n res[\"max_grad\"] = self.max_grad\n self.max_grad = 0\n\n return res\n\n def train(self):\n self.loss_average.reset()\n\n self.data_iter = iter(self.train_loader)\n self.fetcher = framework.helpers.StoppingParallelProducer(self.fetch_thread)\n\n try:\n while (self.helper.args.stop_after or 10e10) > self.helper.state.iter:\n self.load_time_meter.stop()\n\n res, plots = self.train_step()\n plots.update(self.plot(res))\n\n with self.plot_time_meter:\n self.helper.log(plots)\n\n self.load_time_meter.start()\n\n self.helper.tick()\n except self.fetcher.Stopped:\n pass" }, { "identifier": "LanguageModelInterface", "path": "interfaces/language_model_interface.py", "snippet": "class LanguageModelInterface(ModelInterface):\n def __init__(self, model: torch.nn.Module, batch_dim: int = 1, drop_state_prob: float = 0,\n dist_env: Optional[DistributedEnv] = None, save_state: bool = False,\n n_ubatches: Optional[int] = 1):\n super().__init__()\n self.model = model\n self.batch_dim = batch_dim\n self.drop_state_prob = drop_state_prob\n self.time_dim = 1 - self.batch_dim\n self.dist_env = dist_env\n self.save_state = save_state\n self.n_ubatches = n_ubatches or 1\n self.reset_state()\n\n def create_input(self, data: Dict[str, torch.Tensor]) -> torch.Tensor:\n return data[\"data\"].narrow(self.time_dim, 0, data[\"data\"].shape[self.time_dim] - 1)\n\n def decode_outputs(self, outputs: RecurrentResult) -> Any:\n return outputs.outputs\n\n def reset_state(self):\n self.state = [None] * self.n_ubatches\n\n def loss(self, net_out: torch.Tensor, target: torch.Tensor, log: bool) -> Tuple[torch.Tensor, Dict[str, Any]]:\n assert net_out.shape[:-1] == target.shape\n assert net_out.ndim == 3\n\n return F.cross_entropy(net_out.flatten(0, -2), target.flatten().long()), {}\n\n def create_target(self, data: Dict[str, torch.Tensor]) -> torch.Tensor:\n return data[\"data\"].narrow(self.time_dim, 1, data[\"data\"].shape[self.time_dim] - 1).contiguous()\n\n def __call__(self, data: Dict[str, torch.Tensor], iter: int, ubatch: int) -> Tuple[LanguageModelResult, Dict[str, Any]]:\n if self.model.training and self.drop_state_prob > 0 and random.random() < self.drop_state_prob:\n self.reset_state()\n\n if ubatch > 0 and not self.model.training:\n raise ValueError(\"Microbatching is not supported in eval time\")\n\n input = self.create_input(data)\n target = self.create_target(data)\n\n plots = {}\n res, state = self.model(input, target, self.state[ubatch])\n if isinstance(res, torch.nn.modules.adaptive._ASMoutput):\n loss = res.loss\n # res = res.outputs\n else:\n loss, plots = self.loss(res, target, iter % 100 == 0)\n\n self.state[ubatch] = U.apply_to_tensors(state, lambda x: x.detach())\n return LanguageModelResult(res, loss), plots\n\n def state_dict(self) -> Dict[str, Any]:\n if not self.save_state:\n return {}\n\n if self.dist_env is not None and self.dist_env.is_distributed:\n # Collect the state from all workers\n state = []\n for s in self.state:\n alist = [None] * self.dist_env.world_size\n s = torch.distributed.all_gather(alist, s)\n s = torch.cat(s, self.batch_dim)\n state.append(s)\n return {\"state\": state}\n else:\n return {\"state\": self.state}\n\n def load_state_dict(self, state: Dict[str, Any]):\n if not self.save_state:\n self.reset_state()\n return\n\n if len(self.state) != len(state[\"state\"]):\n print(f\"WARNING: Number of microbatches changed from {len(state['state'])} to {len(self.state)}. Resetting state.\")\n self.reset_state()\n return\n\n if self.dist_env is not None and self.dist_env.is_distributed:\n state_bs = state[\"state\"][0].shape[self.batch_dim]\n if state_bs % self.dist_env.world_size != 0:\n print(f\"WARNING: State batch size ({state_bs}) is not divisible by the number of workers ({self.dist_env.world_size}). Resetting state.\")\n self.reset_state()\n else:\n bs_per_worker = state_bs // self.dist_env.world_size\n self.state = [s.narrow(self.batch_dim, self.dist_env.local_rank * bs_per_worker, bs_per_worker) for s in state[\"state\"]]\n else:\n self.state = state[\"state\"]" } ]
import framework import torch import torch.nn import torch.utils.data import dataset import random from models import TransformerLanguageModel from ... import task, args from .transformer_lm_mixin import TransformerLMMixin from ..simple_task import SimpleTask from typing import Tuple, Any, Dict, List, Union from interfaces import LanguageModelInterface
9,940
@args def a(parser: framework.helpers.ArgumentParser): parser.add_argument("-lm.state_drop_probability", default=0.0) parser.add_argument("-lm.lstm_weight_drop", default=0.0) parser.add_argument("-lm.unroll", default=100) parser.add_argument("-lm.unroll_eval", default="none", parser=parser.int_or_none_parser) parser.add_argument("-lm.example_context", default=100) parser.add_argument("-lm.example_window", default=40) @task()
@args def a(parser: framework.helpers.ArgumentParser): parser.add_argument("-lm.state_drop_probability", default=0.0) parser.add_argument("-lm.lstm_weight_drop", default=0.0) parser.add_argument("-lm.unroll", default=100) parser.add_argument("-lm.unroll_eval", default="none", parser=parser.int_or_none_parser) parser.add_argument("-lm.example_context", default=100) parser.add_argument("-lm.example_window", default=40) @task()
class Enwik8Transformer(TransformerLMMixin, SimpleTask):
3
2023-12-13 08:45:02+00:00
12k
Q-Future/Q-Align
q_align/model/modeling_mplug_owl2.py
[ { "identifier": "MPLUGOwl2Config", "path": "q_align/model/configuration_mplug_owl2.py", "snippet": "class MPLUGOwl2Config(LlamaConfig):\n model_type = \"mplug_owl2\"\n def __init__(self, visual_config=None, **kwargs):\n if visual_config is None:\n self.visual_config = DEFAULT_VISUAL_CONFIG\n else:\n self.visual_config = visual_config\n \n super().__init__(\n **kwargs,\n )" }, { "identifier": "MplugOwlVisionConfig", "path": "q_align/model/configuration_mplug_owl2.py", "snippet": "class MplugOwlVisionConfig(PretrainedConfig):\n r\"\"\"\n This is the configuration class to store the configuration of a [`MplugOwlVisionModel`]. It is used to instantiate\n a\n mPLUG-Owl vision encoder according to the specified arguments, defining the model architecture. Instantiating a\n configuration defaults will yield a similar configuration to that of the mPLUG-Owl\n [x-plug/x_plug-llama-7b](https://huggingface.co/x-plug/x_plug-llama-7b) architecture.\n\n Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\n documentation from [`PretrainedConfig`] for more information.\n\n Args:\n hidden_size (`int`, *optional*, defaults to 768):\n Dimensionality of the encoder layers and the pooler layer.\n intermediate_size (`int`, *optional*, defaults to 3072):\n Dimensionality of the \"intermediate\" (i.e., feed-forward) layer in the Transformer encoder.\n num_hidden_layers (`int`, *optional*, defaults to 12):\n Number of hidden layers in the Transformer encoder.\n num_attention_heads (`int`, *optional*, defaults to 12):\n Number of attention heads for each attention layer in the Transformer encoder.\n image_size (`int`, *optional*, defaults to 224):\n The size (resolution) of each image.\n patch_size (`int`, *optional*, defaults to 32):\n The size (resolution) of each patch.\n hidden_act (`str` or `function`, *optional*, defaults to `\"quick_gelu\"`):\n The non-linear activation function (function or string) in the encoder and pooler. If string, `\"gelu\"`,\n `\"relu\"`, `\"selu\"` and `\"gelu_new\"` ``\"quick_gelu\"` are supported.\n layer_norm_eps (`float`, *optional*, defaults to 1e-5):\n The epsilon used by the layer normalization layers.\n attention_dropout (`float`, *optional*, defaults to 0.0):\n The dropout ratio for the attention probabilities.\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n initializer_factor (`float`, *optional*, defaults to 1):\n A factor for initializing all weight matrices (should be kept to 1, used internally for initialization\n testing).\n\n\n ```\"\"\"\n\n model_type = \"mplug_owl_vision_model\"\n\n def __init__(\n self,\n hidden_size=1024,\n intermediate_size=4096,\n projection_dim=768,\n num_hidden_layers=24,\n num_attention_heads=16,\n num_channels=3,\n image_size=448,\n patch_size=14,\n hidden_act=\"quick_gelu\",\n layer_norm_eps=1e-6,\n attention_dropout=0.0,\n initializer_range=0.02,\n initializer_factor=1.0,\n use_flash_attn=False,\n **kwargs,\n ):\n super().__init__(**kwargs)\n self.hidden_size = hidden_size\n self.intermediate_size = intermediate_size\n self.projection_dim = projection_dim\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.num_channels = num_channels\n self.patch_size = patch_size\n self.image_size = image_size\n self.initializer_range = initializer_range\n self.initializer_factor = initializer_factor\n self.attention_dropout = attention_dropout\n self.layer_norm_eps = layer_norm_eps\n self.hidden_act = hidden_act\n self.use_flash_attn = use_flash_attn\n\n @classmethod\n def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> \"PretrainedConfig\":\n config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)\n\n # get the vision config dict if we are loading from MplugOwlConfig\n if config_dict.get(\"model_type\") == \"mplug-owl\":\n config_dict = config_dict[\"vision_config\"]\n\n if \"model_type\" in config_dict and hasattr(cls, \"model_type\") and config_dict[\"model_type\"] != cls.model_type:\n logger.warning(\n f\"You are using a model of type {config_dict['model_type']} to instantiate a model of type \"\n f\"{cls.model_type}. This is not supported for all configurations of models and can yield errors.\"\n )\n\n return cls.from_dict(config_dict, **kwargs)" }, { "identifier": "MplugOwlVisualAbstractorConfig", "path": "q_align/model/configuration_mplug_owl2.py", "snippet": "class MplugOwlVisualAbstractorConfig(PretrainedConfig):\n model_type = \"mplug_owl_visual_abstract\"\n\n def __init__(\n self,\n num_learnable_queries=64,\n hidden_size=1024,\n num_hidden_layers=6,\n num_attention_heads=16,\n intermediate_size=2816,\n attention_probs_dropout_prob=0.,\n initializer_range=0.02,\n layer_norm_eps=1e-6,\n encoder_hidden_size=1024,\n grid_size=None,\n **kwargs,\n ):\n super().__init__(**kwargs)\n self.hidden_size = hidden_size\n self.num_learnable_queries = num_learnable_queries\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.intermediate_size = intermediate_size\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.initializer_range = initializer_range\n self.layer_norm_eps = layer_norm_eps\n self.encoder_hidden_size = encoder_hidden_size\n self.grid_size = grid_size if grid_size else 32\n\n @classmethod\n def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> \"PretrainedConfig\":\n config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)\n\n # get the visual_abstractor config dict if we are loading from MplugOwlConfig\n if config_dict.get(\"model_type\") == \"mplug-owl\":\n config_dict = config_dict[\"abstractor_config\"]\n\n if \"model_type\" in config_dict and hasattr(cls, \"model_type\") and config_dict[\"model_type\"] != cls.model_type:\n logger.warning(\n f\"You are using a model of type {config_dict['model_type']} to instantiate a model of type \"\n f\"{cls.model_type}. This is not supported for all configurations of models and can yield errors.\"\n )\n\n return cls.from_dict(config_dict, **kwargs)" }, { "identifier": "MplugOwlVisionModel", "path": "q_align/model/visual_encoder.py", "snippet": "class MplugOwlVisionModel(PreTrainedModel):\n main_input_name = \"pixel_values\"\n _no_split_modules = [\"MplugOwlVisionEncoderLayer\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.config = config\n self.hidden_size = config.hidden_size\n\n self.embeddings = MplugOwlVisionEmbeddings(config)\n self.encoder = MplugOwlVisionEncoder(config)\n self.post_layernorm = nn.LayerNorm(self.hidden_size, eps=config.layer_norm_eps)\n\n self.post_init()\n\n\n def forward(\n self,\n pixel_values: Optional[torch.FloatTensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, BaseModelOutputWithPooling]:\n r\"\"\"\n Returns:\n\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if pixel_values is None:\n raise ValueError(\"You have to specify pixel_values\")\n\n hidden_states = self.embeddings(pixel_values)\n\n encoder_outputs = self.encoder(\n inputs_embeds=hidden_states,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n last_hidden_state = encoder_outputs[0]\n last_hidden_state = self.post_layernorm(last_hidden_state)\n\n pooled_output = last_hidden_state[:, 0, :]\n pooled_output = self.post_layernorm(pooled_output)\n\n if not return_dict:\n return (last_hidden_state, pooled_output) + encoder_outputs[1:]\n\n return BaseModelOutputWithPooling(\n last_hidden_state=last_hidden_state,\n pooler_output=pooled_output,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n )\n\n def get_input_embeddings(self):\n return self.embeddings" }, { "identifier": "MplugOwlVisualAbstractorModel", "path": "q_align/model/visual_encoder.py", "snippet": "class MplugOwlVisualAbstractorModel(PreTrainedModel):\n _no_split_modules = [\"MplugOwlVisualAbstractorLayer\"]\n def __init__(self, config, language_hidden_size):\n super().__init__(config)\n self.config = config\n\n self.encoder = MplugOwlVisualAbstractorEncoder(config)\n self.visual_fc = torch.nn.Linear(config.hidden_size, language_hidden_size)\n self.query_embeds = torch.nn.Parameter(torch.randn(1, config.num_learnable_queries, config.hidden_size))\n self.vit_eos = torch.nn.Parameter(torch.randn(1, 1, language_hidden_size))\n\n self.post_init()\n\n def _prune_heads(self, heads_to_prune):\n \"\"\"\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base\n class PreTrainedModel\n \"\"\"\n for layer, heads in heads_to_prune.items():\n self.encoder.layer[layer].attention.prune_heads(heads)\n\n def get_extended_attention_mask(\n self,\n attention_mask: torch.Tensor,\n input_shape: Tuple[int],\n device: torch.device,\n ) -> torch.Tensor:\n \"\"\"\n Makes broadcastable attention and causal masks so that future and masked tokens are ignored.\n\n Arguments:\n attention_mask (`torch.Tensor`):\n Mask with ones indicating tokens to attend to, zeros for tokens to ignore.\n input_shape (`Tuple[int]`):\n The shape of the input to the model.\n device: (`torch.device`):\n The device of the input to the model.\n\n Returns:\n `torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`.\n \"\"\"\n # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]\n # ourselves in which case we just need to make it broadcastable to all heads.\n if attention_mask.dim() == 3:\n extended_attention_mask = attention_mask[:, None, :, :]\n elif attention_mask.dim() == 2:\n # Provided a padding mask of dimensions [batch_size, seq_length]\n # - the model is an encoder, so make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]\n extended_attention_mask = attention_mask[:, None, None, :]\n else:\n raise ValueError(\n \"Wrong shape for input_ids (shape {}) or attention_mask (shape {})\".format(\n input_shape, attention_mask.shape\n )\n )\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -10000.0 for masked positions.\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility\n extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\n return extended_attention_mask\n\n def forward(\n self,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_values=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, `optional`):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if\n the model is configured as a decoder.\n encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in\n the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of:\n shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and\n value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are\n used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key\n value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape\n `(batch_size, sequence_length)`.\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n \n query_embeds = self.query_embeds.repeat(encoder_hidden_states.shape[0], 1, 1)\n embedding_output = query_embeds\n input_shape = embedding_output.size()[:-1]\n batch_size, seq_length = input_shape\n device = embedding_output.device\n\n # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]\n # ourselves in which case we just need to make it broadcastable to all heads.\n if attention_mask is None:\n attention_mask = torch.ones(\n (query_embeds.shape[0], query_embeds.shape[1]), dtype=torch.long, device=query_embeds.device\n )\n extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, device)\n\n # If a 2D or 3D attention mask is provided for the cross-attention\n # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]\n if encoder_hidden_states is not None:\n if type(encoder_hidden_states) == list:\n encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size()\n else:\n (\n encoder_batch_size,\n encoder_sequence_length,\n _,\n ) = encoder_hidden_states.size()\n encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)\n\n if type(encoder_attention_mask) == list:\n encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask]\n elif encoder_attention_mask is None:\n encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)\n encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)\n else:\n encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)\n else:\n encoder_extended_attention_mask = None\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\n head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)\n\n encoder_outputs = self.encoder(\n embedding_output,\n attention_mask=extended_attention_mask,\n head_mask=head_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_extended_attention_mask,\n past_key_values=past_key_values,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n sequence_output = encoder_outputs[0]\n pooled_output = sequence_output[:, 0, :]\n\n sequence_output = self.visual_fc(sequence_output)\n sequence_output = torch.cat([sequence_output, self.vit_eos.repeat(sequence_output.shape[0], 1, 1)], dim=1)\n\n return BaseModelOutputWithPooling(\n last_hidden_state=sequence_output,\n pooler_output=pooled_output,\n hidden_states=encoder_outputs.hidden_states,\n )" }, { "identifier": "replace_llama_modality_adaptive", "path": "q_align/model/modeling_llama2.py", "snippet": "def replace_llama_modality_adaptive():\n transformers.models.llama.configuration_llama.LlamaConfig = LlamaConfig\n transformers.models.llama.modeling_llama.LlamaAttention = LlamaAttention\n transformers.models.llama.modeling_llama.LlamaFlashAttention2 = LlamaFlashAttention2\n transformers.models.llama.modeling_llama.LlamaSdpaAttention = LlamaSdpaAttention\n transformers.models.llama.modeling_llama.LlamaDecoderLayer = LlamaDecoderLayer\n transformers.models.llama.modeling_llama.LlamaModel.forward = model_forward\n transformers.models.llama.modeling_llama.LlamaForCausalLM.forward = causal_model_forward" } ]
from abc import ABC, abstractmethod from typing import List, Optional, Tuple, Union from torch.nn import CrossEntropyLoss from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, CLIPImageProcessor, LlamaConfig, LlamaModel, LlamaForCausalLM from transformers.modeling_outputs import CausalLMOutputWithPast from .configuration_mplug_owl2 import MPLUGOwl2Config, MplugOwlVisionConfig, MplugOwlVisualAbstractorConfig from .visual_encoder import MplugOwlVisionModel, MplugOwlVisualAbstractorModel from .modeling_llama2 import replace_llama_modality_adaptive from icecream import ic from PIL import Image from icecream import ic import torch import torch.nn as nn import copy import os import sys
7,952
# Initialize weights and apply final processing self.post_init() def get_model(self): return self.model def score(self, images, task_: str = "quality", input_: str = "image", ): if not hasattr(self, "weight_tensor"): self.weight_tensor = torch.Tensor([5.,4.,3.,2.,1.]).half().to(self.device) prompt = "USER: How would you rate the {} of this {}?\n<|image|>\nASSISTANT: The {} of the {} is".format(task_, input_, input_, task_) if input_ == "image": images = [expand2square(img, tuple(int(x*255) for x in self.image_processor.image_mean)) for img in images] input_ids = tokenizer_image_token(prompt, self.tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).to(self.device) with torch.inference_mode(): image_tensor = self.image_processor.preprocess(images, return_tensors="pt")["pixel_values"].half().to(self.device) output_logits = self(input_ids.repeat(image_tensor.shape[0], 1), images=image_tensor)["logits"][:,-1, self.preferential_ids_] return torch.softmax(output_logits, -1) @ self.weight_tensor else: video = [[expand2square(frame, tuple(int(x*255) for x in self.image_processor.image_mean)) for frame in vid] for vid in images] input_ids = tokenizer_image_token(prompt, self.tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).to(self.device) with torch.inference_mode(): video_tensors = [self.image_processor.preprocess(vid, return_tensors="pt")["pixel_values"].half().to(self.model.device) for vid in video] output_logits = self(input_ids.repeat(len(video_tensors), 1), images=video_tensors)["logits"][:,-1, self.preferential_ids_] return torch.softmax(output_logits, -1) @ self.weight_tensor def forward( self, input_ids: torch.LongTensor = None, # modality_indicators: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, images: Optional[torch.FloatTensor] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict input_ids, modality_indicators, attention_mask, past_key_values, inputs_embeds, labels = \ self.prepare_inputs_labels_for_multimodal(input_ids, attention_mask, past_key_values, labels, images) # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=input_ids, modality_indicators=modality_indicators, attention_mask=attention_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict ) hidden_states = outputs[0] logits = self.lm_head(hidden_states) loss = None if labels is not None: # Shift so that tokens < n predict n shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss() shift_logits = shift_logits.view(-1, self.config.vocab_size) shift_labels = shift_labels.view(-1) # Enable model/pipeline parallelism shift_labels = shift_labels.to(shift_logits.device) loss = loss_fct(shift_logits, shift_labels) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs ): if past_key_values: input_ids = input_ids[:, -1:] # if `inputs_embeds` are passed, we only want to use them in the 1st generation step if inputs_embeds is not None and past_key_values is None: model_inputs = {"inputs_embeds": inputs_embeds} else: model_inputs = {"input_ids": input_ids} model_inputs.update( { "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache"), "attention_mask": attention_mask, "images": kwargs.get("images", None), } ) return model_inputs AutoConfig.register("mplug_owl2", MPLUGOwl2Config) AutoModelForCausalLM.register(MPLUGOwl2Config, MPLUGOwl2LlamaForCausalLM)
# Copyright 2023 Haotian Liu & Qinghao Ye (Modified from LLaVA) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. dir_path = os.path.dirname(os.path.realpath(__file__)) sys.path.insert(0, dir_path) IGNORE_INDEX = -100 IMAGE_TOKEN_INDEX = -200 DEFAULT_IMAGE_TOKEN = "<|image|>" def tokenizer_image_token(prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, return_tensors=None): prompt_chunks = [tokenizer(chunk).input_ids if len(chunk) > 0 else [] for chunk in prompt.split(DEFAULT_IMAGE_TOKEN)] def insert_separator(X, sep): return [ele for sublist in zip(X, [sep]*len(X)) for ele in sublist][:-1] input_ids = [] offset = 0 if len(prompt_chunks) > 0 and len(prompt_chunks[0]) > 0 and prompt_chunks[0][0] == tokenizer.bos_token_id: offset = 1 input_ids.append(prompt_chunks[0][0]) for x in insert_separator(prompt_chunks, [image_token_index] * (offset + 1)): input_ids.extend(x[offset:]) if return_tensors is not None: if return_tensors == 'pt': return torch.tensor(input_ids, dtype=torch.long) raise ValueError(f'Unsupported tensor type: {return_tensors}') return input_ids def expand2square(pil_img, background_color): width, height = pil_img.size if width == height: return pil_img elif width > height: result = Image.new(pil_img.mode, (width, width), background_color) result.paste(pil_img, (0, (width - height) // 2)) return result else: result = Image.new(pil_img.mode, (height, height), background_color) result.paste(pil_img, ((height - width) // 2, 0)) return result class MPLUGOwl2MetaModel: def __init__(self, config): super(MPLUGOwl2MetaModel, self).__init__(config) self.vision_model = MplugOwlVisionModel( MplugOwlVisionConfig(**config.visual_config["visual_model"]) ) self.visual_abstractor = MplugOwlVisualAbstractorModel( MplugOwlVisualAbstractorConfig(**config.visual_config["visual_abstractor"]), config.hidden_size ) def get_vision_tower(self): vision_model = getattr(self, 'vision_model', None) if type(vision_model) is list: vision_model = vision_model[0] return vision_model def get_visual_abstractor(self): visual_abstractor = getattr(self, 'visual_abstractor', None) if type(visual_abstractor) is list: visual_abstractor = visual_abstractor[0] return visual_abstractor class MPLUGOwl2MetaForCausalLM(ABC): @abstractmethod def get_model(self): pass def encode_images(self, images): image_features = self.get_model().vision_model(images).last_hidden_state image_features = self.get_model().visual_abstractor(encoder_hidden_states=image_features).last_hidden_state return image_features def prepare_inputs_labels_for_multimodal( self, input_ids, attention_mask, past_key_values, labels, images ): if images is None or input_ids.shape[1] == 1: if past_key_values is not None and images is not None and input_ids.shape[1] == 1: attention_mask = torch.ones((attention_mask.shape[0], past_key_values[-1][-1].shape[-2] + 1), dtype=attention_mask.dtype, device=attention_mask.device) multiway_indices = torch.zeros_like(input_ids).long().to(self.device) return input_ids, multiway_indices, attention_mask, past_key_values, None, labels if type(images) is list or images.ndim == 5: concat_images = torch.cat([image for image in images], dim=0) image_features = self.encode_images(concat_images) split_sizes = [image.shape[0] for image in images] image_features = torch.split(image_features, split_sizes, dim=0) image_features = [x.flatten(0, 1) for x in image_features] else: image_features = self.encode_images(images) new_input_embeds = [] new_modality_indicators = [] new_labels = [] if labels is not None else None cur_image_idx = 0 for batch_idx, cur_input_ids in enumerate(input_ids): if (cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0: # multimodal LLM, but the current sample is not multimodal # FIXME: this is a hacky fix, for deepspeed zero3 to work half_len = cur_input_ids.shape[0] // 2 cur_image_features = image_features[cur_image_idx] cur_input_embeds_1 = self.get_model().embed_tokens(cur_input_ids[:half_len]) cur_input_embeds_2 = self.get_model().embed_tokens(cur_input_ids[half_len:]) cur_input_embeds = torch.cat([cur_input_embeds_1, cur_image_features[0:0], cur_input_embeds_2], dim=0) new_input_embeds.append(cur_input_embeds) cur_modality_indicators = torch.zeros(len(cur_input_embeds)).long().to(self.device) new_modality_indicators.append(cur_modality_indicators) if labels is not None: new_labels.append(labels[batch_idx]) cur_image_idx += 1 continue image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0] cur_new_input_embeds = [] cur_modality_indicators = [] if labels is not None: cur_labels = labels[batch_idx] cur_new_labels = [] assert cur_labels.shape == cur_input_ids.shape while image_token_indices.numel() > 0: cur_image_features = image_features[cur_image_idx] image_token_start = image_token_indices[0] cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start])) cur_new_input_embeds.append(cur_image_features) # Add modality indicator assert image_token_start == len(cur_input_ids[:image_token_start]) cur_modality_indicators.append(torch.zeros(len(cur_input_ids[:image_token_start])).long()) cur_modality_indicators.append(torch.ones(len(cur_image_features)).long()) if labels is not None: cur_new_labels.append(cur_labels[:image_token_start]) cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype)) cur_labels = cur_labels[image_token_start+1:] cur_image_idx += 1 cur_input_ids = cur_input_ids[image_token_start+1:] image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0] if cur_input_ids.numel() > 0: cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids)) cur_modality_indicators.append(torch.zeros(len(cur_input_ids)).long()) if labels is not None: cur_new_labels.append(cur_labels) cur_new_input_embeds = [x.to(device=self.device) for x in cur_new_input_embeds] cur_new_input_embeds = torch.cat(cur_new_input_embeds, dim=0) new_input_embeds.append(cur_new_input_embeds) # Modality cur_modality_indicators = [x.to(device=self.device) for x in cur_modality_indicators] cur_modality_indicators = torch.cat(cur_modality_indicators, dim=0) new_modality_indicators.append(cur_modality_indicators) if labels is not None: cur_new_labels = torch.cat(cur_new_labels, dim=0) new_labels.append(cur_new_labels) if any(x.shape != new_input_embeds[0].shape for x in new_input_embeds): max_len = max(x.shape[0] for x in new_input_embeds) # Embedding new_input_embeds_align = [] for cur_new_embed in new_input_embeds: cur_new_embed = torch.cat((cur_new_embed, torch.zeros((max_len - cur_new_embed.shape[0], cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)), dim=0) new_input_embeds_align.append(cur_new_embed) new_input_embeds = torch.stack(new_input_embeds_align, dim=0) # Modality new_modality_indicators_align = [] for cur_modality_indicator in new_modality_indicators: cur_new_embed = torch.cat((cur_modality_indicator, torch.zeros(max_len - cur_modality_indicator.shape[0], dtype=cur_modality_indicator.dtype, device=cur_modality_indicator.device)), dim=0) new_modality_indicators_align.append(cur_new_embed) new_modality_indicators = torch.stack(new_modality_indicators_align, dim=0) # Label if labels is not None: new_labels_align = [] _new_labels = new_labels for cur_new_label in new_labels: cur_new_label = torch.cat((cur_new_label, torch.full((max_len - cur_new_label.shape[0],), IGNORE_INDEX, dtype=cur_new_label.dtype, device=cur_new_label.device)), dim=0) new_labels_align.append(cur_new_label) new_labels = torch.stack(new_labels_align, dim=0) # Attention Mask if attention_mask is not None: new_attention_mask = [] for cur_attention_mask, cur_new_labels, cur_new_labels_align in zip(attention_mask, _new_labels, new_labels): new_attn_mask_pad_left = torch.full((cur_new_labels.shape[0] - labels.shape[1],), True, dtype=attention_mask.dtype, device=attention_mask.device) new_attn_mask_pad_right = torch.full((cur_new_labels_align.shape[0] - cur_new_labels.shape[0],), False, dtype=attention_mask.dtype, device=attention_mask.device) cur_new_attention_mask = torch.cat((new_attn_mask_pad_left, cur_attention_mask, new_attn_mask_pad_right), dim=0) new_attention_mask.append(cur_new_attention_mask) attention_mask = torch.stack(new_attention_mask, dim=0) assert attention_mask.shape == new_labels.shape else: new_input_embeds = torch.stack(new_input_embeds, dim=0) new_modality_indicators = torch.stack(new_modality_indicators, dim=0) if labels is not None: new_labels = torch.stack(new_labels, dim=0) if attention_mask is not None: new_attn_mask_pad_left = torch.full((attention_mask.shape[0], new_input_embeds.shape[1] - input_ids.shape[1]), True, dtype=attention_mask.dtype, device=attention_mask.device) attention_mask = torch.cat((new_attn_mask_pad_left, attention_mask), dim=1) assert attention_mask.shape == new_input_embeds.shape[:2] return None, new_modality_indicators, attention_mask, past_key_values, new_input_embeds, new_labels class MPLUGOwl2LlamaModel(MPLUGOwl2MetaModel, LlamaModel): config_class = MPLUGOwl2Config def __init__(self, config: MPLUGOwl2Config): super(MPLUGOwl2LlamaModel, self).__init__(config) class MPLUGOwl2LlamaForCausalLM(LlamaForCausalLM, MPLUGOwl2MetaForCausalLM): config_class = MPLUGOwl2Config def __init__(self, config): super(LlamaForCausalLM, self).__init__(config) self.model = MPLUGOwl2LlamaModel(config) self.tokenizer = AutoTokenizer.from_pretrained("q-future/one-align") self.image_processor = CLIPImageProcessor.from_pretrained("q-future/one-align") self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.preferential_ids_ = [id_[1] for id_ in self.tokenizer(["excellent","good","fair","poor","bad"])["input_ids"]] # Initialize weights and apply final processing self.post_init() def get_model(self): return self.model def score(self, images, task_: str = "quality", input_: str = "image", ): if not hasattr(self, "weight_tensor"): self.weight_tensor = torch.Tensor([5.,4.,3.,2.,1.]).half().to(self.device) prompt = "USER: How would you rate the {} of this {}?\n<|image|>\nASSISTANT: The {} of the {} is".format(task_, input_, input_, task_) if input_ == "image": images = [expand2square(img, tuple(int(x*255) for x in self.image_processor.image_mean)) for img in images] input_ids = tokenizer_image_token(prompt, self.tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).to(self.device) with torch.inference_mode(): image_tensor = self.image_processor.preprocess(images, return_tensors="pt")["pixel_values"].half().to(self.device) output_logits = self(input_ids.repeat(image_tensor.shape[0], 1), images=image_tensor)["logits"][:,-1, self.preferential_ids_] return torch.softmax(output_logits, -1) @ self.weight_tensor else: video = [[expand2square(frame, tuple(int(x*255) for x in self.image_processor.image_mean)) for frame in vid] for vid in images] input_ids = tokenizer_image_token(prompt, self.tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).to(self.device) with torch.inference_mode(): video_tensors = [self.image_processor.preprocess(vid, return_tensors="pt")["pixel_values"].half().to(self.model.device) for vid in video] output_logits = self(input_ids.repeat(len(video_tensors), 1), images=video_tensors)["logits"][:,-1, self.preferential_ids_] return torch.softmax(output_logits, -1) @ self.weight_tensor def forward( self, input_ids: torch.LongTensor = None, # modality_indicators: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, images: Optional[torch.FloatTensor] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict input_ids, modality_indicators, attention_mask, past_key_values, inputs_embeds, labels = \ self.prepare_inputs_labels_for_multimodal(input_ids, attention_mask, past_key_values, labels, images) # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=input_ids, modality_indicators=modality_indicators, attention_mask=attention_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict ) hidden_states = outputs[0] logits = self.lm_head(hidden_states) loss = None if labels is not None: # Shift so that tokens < n predict n shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss() shift_logits = shift_logits.view(-1, self.config.vocab_size) shift_labels = shift_labels.view(-1) # Enable model/pipeline parallelism shift_labels = shift_labels.to(shift_logits.device) loss = loss_fct(shift_logits, shift_labels) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs ): if past_key_values: input_ids = input_ids[:, -1:] # if `inputs_embeds` are passed, we only want to use them in the 1st generation step if inputs_embeds is not None and past_key_values is None: model_inputs = {"inputs_embeds": inputs_embeds} else: model_inputs = {"input_ids": input_ids} model_inputs.update( { "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache"), "attention_mask": attention_mask, "images": kwargs.get("images", None), } ) return model_inputs AutoConfig.register("mplug_owl2", MPLUGOwl2Config) AutoModelForCausalLM.register(MPLUGOwl2Config, MPLUGOwl2LlamaForCausalLM)
replace_llama_modality_adaptive()
5
2023-12-14 03:36:30+00:00
12k
nox-410/tvm.tl
python/tvm/topi/hexagon/qnn/avg_pool2d.py
[ { "identifier": "get_layout_transform_fn", "path": "python/tvm/topi/hexagon/utils.py", "snippet": "def get_layout_transform_fn(layout):\n \"\"\"Return index map function as per the layout string\"\"\"\n if layout == \"nhwc-8h2w32c2w-2d\":\n return nhwc_8h2w32c2w_2d\n if layout == \"nhwc-8h2w32c2w-1d\":\n return nhwc_8h2w32c2w_1d\n if layout == \"nchw-8h2w32c2w-2d\":\n return nchw_8h2w32c2w_2d\n if layout == \"n11c-1024c-2d\":\n return n11c_1024c_2d\n if layout == \"n11c-1024c-1d\":\n return n11c_1024c_1d\n if layout == \"nhwc-1024c-2d\":\n return nhwc_1024c_2d\n if layout == \"nc11-1024c-2d\":\n return nc11_1024c_2d\n if layout == \"nc-1024-2d\":\n return nc_1024_2d\n if layout == \"nhw-32h16w-2d\":\n return nhw_32h16w_2d\n if layout == \"nhwc-4h4w32c-2d\":\n return nhwc_4h4w32c_2d\n if layout == \"nhwc-4h4w32c-1d\":\n return nhwc_4h4w32c_1d\n if layout == \"nc-512c-2d\":\n return nc_512c_2d\n if layout == \"nc-512c-1d\":\n return nc_512c_1d\n if layout == \"nhwc-4h2w32c2w-2d\":\n return nhwc_4h2w32c2w_2d\n if layout == \"nc-2048c-1d\":\n return nc_2048c_1d\n if layout == \"nc-2048c-2d\":\n return nc_2048c_2d\n if layout == \"nc-1024c-2d\":\n return nc_1024c_2d\n if layout == \"nc-1024c-1d\":\n return nc_1024c_1d\n if layout == \"iohw-16i32o2i-1d\":\n return iohw_16i32o2i_1d\n if layout == \"nhwc-2048c-2d\":\n return nhwc_2048c_2d\n if layout == \"nc-2048-2d\":\n return nc_2048_2d\n if layout == \"nc-2048c-2d\":\n return nc_2048c_2d\n if layout == \"nhwc-8h8w32c-2d\":\n return nhwc_8h8w32c_2d\n if layout == \"nhwc-8h8w32c-1d\":\n return nhwc_8h8w32c_1d\n if layout == \"nchw-8h8w32c-2d\":\n return nchw_8h8w32c_2d\n if layout == \"n11c-2048c-2d\":\n return n11c_2048c_2d\n if layout == \"n11c-2048c-1d\":\n return n11c_2048c_1d\n if layout == \"ohwi32o-1d\":\n return ohwi32o_1d\n if layout == \"nc11-2048c-2d\":\n return nc11_2048c_2d\n if layout == \"ncw-32c64w-2d\":\n return ncw_32c64w_2d\n if layout == \"nchw-32c8h8w-2d\":\n return nchw_32c8h8w_2d\n if layout == \"nchw-32c8h4w-2d\":\n return nchw_32c8h4w_2d\n if layout == \"nchw-8h8w32c-2d\":\n return nchw_8h8w32c_2d\n raise RuntimeError(f\"Unexpected layout '{layout}'\")" }, { "identifier": "get_fixed_point_value", "path": "python/tvm/topi/hexagon/utils.py", "snippet": "def get_fixed_point_value(flp: float, dtype: str = \"int16\") -> Tuple[int, int]:\n \"\"\"\n Return fixed-point value and the corresponding log2 of the scale factor used to compute\n this value.\n\n Parameters\n ----------\n flp : float\n Floating-point value to be converted\n dtype : str\n Type of the resulting fixed-point value. By default, it's set to \"int16\"\n\n Returns\n -------\n fixed_point_value : int\n Fixed-point value for the given floating-point value\n exp_scale_factor : int\n log2 of the scale factor\n\n Convert floating-point value into fixed-point number. This is done by\n multiplying the value by a scaling factor and then rounding it to the nearest\n integer value.\n\n As per IEEE-754 standard, a floating-point value can be represented as follows\n [see: https://en.wikipedia.org/wiki/IEEE_754-1985]:\n (-1)^S * M * 2^(E-Bias)\n\n Here,\n * S is the signed bit (0 or 1).\n * M is the mantissa. It's composed of an implicit 1 for the normalized floating-point\n values or 0 for the denormalized values, and the fraction part. This ensures that\n mantissa is always within [0, 2) range. Please note that this function doesn't\n handle denormalized values.\n * E is the exponent.\n\n In single precision, 23 bits are used to represent the fraction part of\n the mantissa (and therefore, '23' shows up in one of the computations below) and\n 8 bits are used for the exponent. Since exponent field needs to reperesent both\n positive and negative values, a bias (127 for single precision) is added to the actual\n value. Therefore, to compute the actual exponent, 127 must be subtracted from the stored\n value.\n\n As mentioned above, to find the corresponding fixed-point number, we multiply the\n value with a scaling factor and then round it to the nearest integer. The scaling factor\n is chosen to be a power for 2 and it's the largest value that can be safely multiplied\n to the floating-point value, without causing the resulting value to overflow the range\n of the integer type used to represent the fixed-point value.\n\n So, if we assume the scaling factor to be 2^x, the resulting fixed-point value will be:\n round((-1)^S * (M) * 2^(E-Bias) * 2^x)\n\n This can be simplified to:\n round((-1)^S * M * 2^(E-Bias+x)\n\n Now, if 'int16' is used for fixed-point value, then it has to be >= -(2 * 2^14)\n and <= (2 * 2^14) - 1. Since M (Mantissa) is always < 2, in order for the fixed-point value\n to be within this range, 2^(E - Bias + x) must be <= 2^14 - 1.\n And, if we ignore -1, (E - Bias + x) should be <= 14. Note: if mantissa gets too close to 2,\n this will cause the resulting value to go out of range and require it to be saturated.\n In the following implementation, we perform range check and adjust the scale to avoid\n saturation.\n For most cases, 2^x, where x = 14 - (E - Bias) or 14 - (E - 127) for single precision, is the\n best scaling factor for 'int16' type that can be used to convert the floating-point value to\n fixed-point with the least amount of precision loss.\n\n\n Here is a more rigorous explanation of the above, for non-negative scale values, which are of\n interest. M < 2, so M * 2^(E-Bias+x) < 2 ^ (E-Bias+x+1) [Note: LHS is a fraction, RHS int]\n => round(M * 2^(E-Bias+x)) <= 2 ^ (E-Bias+x+1) [Note the \"<=\", not \"<\"]\n We want x s.t. round(M * 2^(E-Bias+x)) <= 2^15 - 1\n We know round(M * 2^(E-Bias+x)) <= 2^(E-Bias+x+1)\n It will be sufficient to choose x s.t. 2^(E-Bias+x+1) <= 2^15 - 1\n That is, max x. s.t. 2^(E-Bias+x+1) < 2^15\n E-Bias+x+1 < 15\n E-Bias+x+1 <= 14\n Max x will make E-Bias+x+1 = 14\n x = 13 - E + Bias\n\n Additonal notes on various floating-point values:\n ------------------------------------------------\n 1) Denormalized values: causes assertion failure. The problem with the denormalized values\n is that they require a very large scale factor (>= 2^127) to be converted to a fixed-point\n value. As the denormalzied values get smaller, the scale factor becomes too large to be\n represented as a IEEE-754 floating point value (as being done in the computaton below)\n and therefore, the denormalized values aren't being handled here.\n 2) NaN and INF: assertion failure\n \"\"\"\n\n def within_range(val, dtype):\n if dtype == \"int16\":\n return -32768 <= val <= 32767\n raise RuntimeError(f\"Unsupported dtype, {dtype}'\")\n\n # Make sure that 'flp' isn't NaN or infinity\n if math.isnan(flp) or math.isinf(flp):\n raise RuntimeError(\"NaN or INF can not be represented as fixed-point\")\n\n flp_f = struct.pack(\"f\", flp)\n flp_i = struct.unpack(\"I\", flp_f)\n exp_stored_value = (flp_i[0] >> 23) & 0xFF\n\n if exp_stored_value == 0:\n raise RuntimeError(\n \"Denormalized values are not considered for float -> fixed-point conversion!\"\n )\n\n exp_value = ((flp_i[0] >> 23) & 0xFF) - 127\n if dtype == \"int16\":\n max_bits = 14\n else:\n raise RuntimeError(f\"Unsupported dtype, {dtype}'\")\n\n exp_scale_factor = max_bits - exp_value # log2 of the scale_factor\n\n if exp_scale_factor > 127:\n raise RuntimeError(\"Value too small for fixed-point conversion!\")\n\n # Scaling factor = 2^exp_scale_factor\n # Since exp_scale_factor can be -ve or +ve, scaling factor is calculated by first\n # representing the value in the binary format as per IEEE floating-point standand and then\n # reinterpreting it as a float using struct.pack and struct.unpack functions.\n # struct.pack returns a bytes object packed as integer and struct.unpack\n # unpacks this bytes object into float.\n scale = ((exp_scale_factor + 127) & 0xFF) << 23\n scale_i = struct.pack(\"I\", scale)\n scale_f = struct.unpack(\"f\", scale_i)\n fixed_point_value = int(round(flp * scale_f[0]))\n\n if not within_range(fixed_point_value, dtype):\n # Adjust scale factor to avoid overflow.\n exp_scale_factor -= 1\n scale = ((exp_scale_factor + 127) & 0xFF) << 23\n scale_i = struct.pack(\"I\", scale)\n scale_f = struct.unpack(\"f\", scale_i)\n fixed_point_value = int(round(flp * scale_f[0]))\n\n return fixed_point_value, exp_scale_factor" }, { "identifier": "is_scalar", "path": "python/tvm/topi/hexagon/utils.py", "snippet": "def is_scalar(expr):\n if isinstance(expr, te.Tensor):\n return expr.ndim == 0 and (isinstance(expr.op.body[0], (tir.FloatImm, tir.IntImm)))\n return isinstance(expr, (tir.FloatImm, tir.IntImm))" }, { "identifier": "get_const_int_value", "path": "python/tvm/topi/hexagon/utils.py", "snippet": "def get_const_int_value(expr):\n if isinstance(expr, te.Tensor):\n assert isinstance(expr.op.body[0], tir.IntImm)\n return expr.op.body[0].value\n return tvm.topi.utils.get_const_int(expr)" }, { "identifier": "get_const_float_value", "path": "python/tvm/topi/hexagon/utils.py", "snippet": "def get_const_float_value(expr):\n if isinstance(expr, te.Tensor):\n assert isinstance(expr.op.body[0], tir.FloatImm)\n return expr.op.body[0].value\n return tvm.topi.utils.get_const_float(expr)" }, { "identifier": "get_const_tuple", "path": "python/tvm/topi/utils.py", "snippet": "def get_const_tuple(in_tuple):\n \"\"\"Verifies input tuple is IntImm or Var, returns tuple of int or Var.\n\n Parameters\n ----------\n in_tuple : tuple of Expr\n The input.\n\n Returns\n -------\n out_tuple : tuple of int\n The output.\n \"\"\"\n ret = []\n ana = None\n for elem in in_tuple:\n if isinstance(elem, (tvm.tir.Var, tvm.tir.expr.Any)):\n ret.append(elem)\n elif not isinstance(elem, (tvm.tir.IntImm, int)):\n ana = tvm.arith.Analyzer() if ana is None else ana\n elem = ana.simplify(elem)\n if not isinstance(elem, tvm.tir.IntImm):\n ret.append(elem)\n else:\n ret.append(get_const_int(elem))\n else:\n ret.append(get_const_int(elem))\n return tuple(ret)" }, { "identifier": "get_pad_tuple", "path": "python/tvm/topi/nn/utils.py", "snippet": "def get_pad_tuple(padding, kernel):\n \"\"\"Common code to get the pad option\n\n Parameters\n ----------\n padding : int or str\n Padding size, or ['VALID', 'SAME']\n\n kernel : tuple of int\n Conv kernel size\n\n Returns\n -------\n pad_top : int\n Padding size on top\n\n pad_left : int\n Padding size on left\n\n pad_down : int\n Padding size on down.\n\n pad_right : int\n Padding size on right.\n \"\"\"\n # compute the padding size\n if isinstance(padding, (tuple, list)):\n if len(padding) == 2:\n pad_h = padding[0] * 2\n pad_w = padding[1] * 2\n elif len(padding) == 4:\n return padding[0], padding[1], padding[2], padding[3]\n else:\n raise ValueError(\"Size of padding can only be 2 or 4\")\n elif isinstance(padding, int):\n pad_h = pad_w = padding * 2\n elif padding == \"VALID\":\n pad_h = 0\n pad_w = 0\n elif padding == \"SAME\":\n pad_h = kernel[0] - 1\n pad_w = kernel[1] - 1\n else:\n raise ValueError(f\"Unknown padding option {padding}\")\n pad_top = (pad_h + 1) // 2\n pad_left = (pad_w + 1) // 2\n return pad_top, pad_left, pad_h - pad_top, pad_w - pad_left" }, { "identifier": "pad", "path": "python/tvm/topi/nn/pad.py", "snippet": "@tvm.te.tag_scope(tag=tag.INJECTIVE + \",pad\")\ndef pad(data, pad_before, pad_after=None, pad_value=0.0, name=\"PadInput\", attrs=None):\n \"\"\"Pad Input with zeros.\n\n Parameters\n ----------\n data : tvm.te.Tensor\n n-D input, can be any layout.\n\n pad_before : list / tuple of n ints\n Pad width on each dimension to pad the before the axis begin.\n\n pad_after : list / tuple of n ints, optional\n Pad width each dimension to pad the after the axis end.\n\n pad_value : float, optional\n The value to be padded.\n\n name : str, optional\n The name prefix operators generated\n\n Returns\n -------\n Output : tvm.te.Tensor\n n-D, the same layout as Input.\n \"\"\"\n n = len(data.shape)\n pad_after = pad_after if pad_after else pad_before\n if len(pad_before) != n:\n raise ValueError(f\"Input dimension and pad_before dismatch : {n} vs {len(pad_before)}\")\n if len(pad_after) != n:\n raise ValueError(f\"Input dimension and pad_after dismatch : {n} vs {len(pad_after)}\")\n ana = tvm.arith.Analyzer()\n dshape = []\n for dim in data.shape:\n if isinstance(dim, tvm.tir.Any):\n dshape.append(tvm.te.size_var(\"dim\"))\n else:\n dshape.append(dim)\n out_shape = tuple(ana.simplify(dshape[i] + pad_before[i] + pad_after[i]) for i in range(n))\n pad_value = (\n pad_value\n if isinstance(pad_value, tvm.tir.PrimExpr)\n else tvm.tir.const(pad_value, data.dtype)\n )\n\n def _pad(*indices):\n not_zero = []\n index_tuple = []\n for i in range(n):\n if equal_const_int(pad_before[i], 0) and equal_const_int(pad_after[i], 0):\n index_tuple.append(indices[i])\n else:\n index_tuple.append(indices[i] - pad_before[i])\n not_zero.append(indices[i] >= pad_before[i])\n not_zero.append(indices[i] < data.shape[i] + pad_before[i])\n if not_zero:\n not_zero = tvm.tir.all(*not_zero)\n return tvm.tir.if_then_else(not_zero, data(*index_tuple), pad_value)\n return data(*index_tuple)\n\n return te.compute(out_shape, _pad, name=name, attrs=attrs)" }, { "identifier": "compute_PoolArea", "path": "python/tvm/topi/hexagon/compute_poolarea.py", "snippet": "def compute_PoolArea(i, j, ih, iw, kh, kw, sh, sw, dh, dw, pad_top, pad_left):\n \"\"\"\n Parameters\n ----------\n i,j:\n index of output tensor along H and W axis\n This is equal to the starting point of the sliding window for which the average is computed\n ih, iw:\n input data size along H and W axis\n kh, kw:\n Kernel size along H and W axis\n sh, sw:\n Stride size along H and W axis\n dh, dw:\n Dilation size along H and W axis\n pad_top, pad_left:\n Pad size on Top and left side of input data\n\n # PoolArea refers to the area of that portion of each sliding window which only includes\n # the input data and not the padded area.\n\n # Motivation: The following example shows the location of the first sliding window (at i=0, j=0)\n # on a 6*6 array, with kernel=[3,3] and padding=[1, 1, 1, 1].\n # The input data elements are shown with (X) and padding data with (0).\n # As shown, the number of non-padding elements that should be used for computing\n # the average of values inside this window is 4, while the windows area is 3*3=9.\n # To compute the PoolArea, we have to move the top/left edge of the window down/right\n # to exclude zero-padding elements. The edge adjustment can be formulated as\n # top_edge = max(i , pad_top)\n # left_edge= max(j , pad_left)\n # Note that pad_top and pad_left represent point 0 of the input data along i and j direction.\n # In this example, bottom_edge and right_edge of the PoolArea do not need any adjustment,\n # because there is no padding data on those side of the window.\n # However, as we slide the window down and to the right, the window might go\n # beyond the input data boundaries (ih and iw). In these cases, bottom/right edge should be\n # moved up/left to be located inside the input data.\n # This can be formulated as\n # bottom_edge = min(i + kh, ih + pad_top)\n # left_edge = min(j + kw, iw + pad_left)\n # Having all the edges,\n # PoolArea = (bottom_edge - top_edge) * (right_edge - left_edge)\n\n # _______\n # |0 0 0|0 0 0 0 0 0 0 0 0 0 0 0 0\n # | | _______\n # |0 X X|X X X X 0 |0 X X|X X X X 0\n # | | | |\n # |0 X X|X X X X 0 ====> |0 X X|X X X X 0\n # |_____| |_____|\n # 0 X X X X X X 0 0 X X X X X X 0\n # 0 X X X X X X 0 0 X X X X X X 0\n # 0 X X X X X X 0 0 X X X X X X 0\n # 0 X X X X X X 0 0 X X X X X X 0\n # 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n\n\n # The above equations are derived under the assumption of having default value (1)\n # for stride and dilation. However, we need to expand them to support non-default\n # stride and dilation values.\n # Stride impacts the starting location of the sliding windows, so i and j should be\n # replaced by (i * sh) and j by (j * sw) in the equations.\n # Dilation changes the window size, making k kernel elements scattered into a d*(k - 1) + 1\n # window.\n # Non-1 dilation means that, we need to divide the adjusted window size by the dilation value\n # to find out how many kernel elements inside the sliding window are inside the input data\n # boundaries:\n # top_edge= max(i * sh , pad_top)\n # left_edge= max(j * sw , pad_left)\n # bottom_edge = min(i * sh + (kh - 1) * dh + 1, ih + pad_top)\n # left_edge = min(j * sw + (kw - 1) * dw + 1, data_w + pad_left)\n # PoolArea = ceil_div((bottom_edge - top_edge), dh) * ceil_div((right_edge - left_edge), dw)\n #\n # Finally, we need to address one corner case related to the non-default dilation:\n # Consider the following example along W axis, where iw = 3, kw = 3 and dw = 2.\n # The first figure on the left shows the sliding window of size 5 starting at index 0,\n # and the first figure on the right shows the same example with sliding window at index 1.\n # The second row of figures show the PoolArea after adjusting the edges\n # (both left_edge - right_edge = 3)\n # The third row of figures show the location of dialated kernel points(*).\n # As shown, although the distance between left and right edge in both cases is 3 and\n # dilation is 2 and ceil_div(3,2)=2, the right PoolArea only includes 1 kernel point.\n\n # Sliding Window: |0 0 X X X |0 0 |0 X X X 0|\n # PoolArea(after edge adjustment): 0 0|X X X |0 0 0|X X X| 0\n # location of dilated kernel points: * 0|* X * |0 0 *|X * X| 0\n # PoolArea (dilated_point_aware): * 0|* X * |0 0 * X|* X| 0\n\n # To address this issue, instead of moving the left_edge to bring it just inside the input\n # data boundary, we should move the edge to the right untill we get to the first dilated kernel\n # point inside the input data boundary.\n # The third row of figures shows how this row adjustment can solve the problem.\n # So the problem is reduced to finding the first dilated kernel point inside the data\n # boundary.# For that, we can find the number of dialted points which are mapped to the padded\n # area and find the location of the next one which should be inside the input data:\n # num_of_prev_points = (pad_top - i * sh - 1) // dh\n # next_point_index = i * sh + (num_prev_points + 1) * dh\n #\n # With that, Top_edge and left_edge can be reformulated as:\n # if i*sh - pad_top < 0:\n # top_edge = i * sh + ((pad_top - i * sh - 1) // dh + 1) * dh\n # else:\n # top_edge = i * sh\n #\n # if j * sw - pad_left < 0:\n # left_edge = j * sw + ((pad_left - j * sw - 1) // dw + 1) * dw\n # else:\n # left_edge= j * sw\n\n \"\"\"\n top_edge = tir.if_then_else(\n tir.all(i * sh - pad_top < 0), i * sh + ((pad_top - i * sh - 1) // dh + 1) * dh, i * sh\n )\n bottom_edge = te.min(i * sh + (kh - 1) * dh + 1, ih + pad_top)\n left_edge = tir.if_then_else(\n tir.all(j * sw - pad_left < 0), j * sw + ((pad_left - j * sw - 1) // dw + 1) * dw, j * sw\n )\n right_edge = te.min(j * sw + (kw - 1) * dw + 1, iw + pad_left)\n return -((bottom_edge - top_edge) // -dh) * -((right_edge - left_edge) // -dw)" } ]
import tvm from tvm import te from tvm import tir from ..utils import ( get_layout_transform_fn, get_fixed_point_value, is_scalar, get_const_int_value, get_const_float_value, ) from ...utils import get_const_tuple from ...nn.utils import get_pad_tuple from ...nn.pad import pad from ..compute_poolarea import compute_PoolArea
8,643
def qnn_avg_pool2d_NHWC( data: te.Tensor, kernel: list, stride: list, padding: list, dilation: list, count_include_pad: bool, oshape: list, odtype: str, # quantization params: input_scale: float, input_zero_point: int, output_scale: float, output_zero_point: int, ): """Compute for quantized avg_pool2d""" kh, kw = kernel rh = te.reduce_axis((0, kh), name="rh") rw = te.reduce_axis((0, kw), name="rw") temp_dtype = get_temp_dtype(kh, kw, odtype) sh, sw = stride dh, dw = dilation scale = input_scale / output_scale scale_fixed_point, rsh = get_fixed_point_value(scale, "int16") corr = (output_zero_point << rsh) - input_zero_point * scale_fixed_point dilated_kh = (kh - 1) * dh + 1 dilated_kw = (kw - 1) * dw + 1 # Compute Area pad_top, pad_left, pad_down, pad_right = get_pad_tuple( get_const_tuple(padding), (dilated_kh, dilated_kw) ) # DOPAD if pad_top != 0 or pad_down != 0 or pad_left != 0 or pad_right != 0: pad_before = (0, pad_top, pad_left, 0) pad_after = (0, pad_down, pad_right, 0) data_pad = pad(data, pad_before, pad_after, pad_value=input_zero_point, name="data_pad") else: # By definition when True, zero-padding will be included in the averaging calculation # This is equivalent to PoolArea = (kh * kw) count_include_pad = True data_pad = data Sum = te.compute( oshape, lambda b, h, w, c: te.sum( data_pad[b, h * sh + dh * rh, w * sw + dw * rw, c].astype(temp_dtype), axis=[rh, rw] ), name="pool_sum", ) if not count_include_pad: # Compute PoolArea using unpadded input tensor _, oh, ow, _ = oshape _, ih, iw, _ = data.shape PoolArea = te.compute( (oh, ow), lambda i, j: compute_PoolArea(i, j, ih, iw, kh, kw, sh, sw, dh, dw, pad_top, pad_left), name="pool_area", ) ScaleWithArea = te.compute( (oh, ow), lambda i, j: tir.if_then_else( tir.all(PoolArea[i, j] > 0), (scale_fixed_point // PoolArea[i, j]).astype("int32"), 0, ), name="scale_with_area", ) Avg = te.compute( oshape, lambda b, h, w, c: saturate( ((Sum[b, h, w, c] * ScaleWithArea[h, w]) + corr + (1 << (rsh - 1))) >> rsh, odtype ).astype(odtype), name="pool_avg", ) else: ScaleWithArea = scale_fixed_point // (kh * kw) Avg = te.compute( oshape, lambda b, h, w, c: saturate( ((Sum[b, h, w, c] * ScaleWithArea) + corr + (1 << (rsh - 1))) >> rsh, odtype ).astype(odtype), name="pool_avg", ) return Avg def qnn_avg_pool2d_wrapper_compute_NCHW( data: te.Tensor, kernel: list, stride: list, padding: list, dilation: list, count_include_pad: bool, oshape: list, odtype: str, # quantization params: input_scale: float, input_zero_point: int, output_scale: float, output_zero_point: int, ): """Extract qnn params""" if ( is_scalar(input_scale) and is_scalar(output_scale) and is_scalar(input_zero_point) and is_scalar(output_zero_point) ):
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=invalid-name, unused-variable, unused-argument, too-many-locals """ Compute and schedule for quantized avg_pool2d op """ def saturate(x: te.Tensor, dtype: str): """Saturate value for the specified data type""" return te.max(te.min_value(dtype), te.min(x, te.max_value(dtype))) def get_temp_dtype(h, w, dtype): temp_dtype = "int16" if h * w < 256 else "int32" if dtype in ("uint8", "int8"): return temp_dtype else: raise RuntimeError(f"Unsupported output dtype, {odtype}'") def qnn_avg_pool2d_NCHW( data: te.Tensor, kernel: list, stride: list, padding: list, dilation: list, count_include_pad: bool, oshape: list, odtype: str, # quantization params: input_scale: float, input_zero_point: int, output_scale: float, output_zero_point: int, ): """Compute for quantized avg_pool2d""" kh, kw = kernel rh = te.reduce_axis((0, kh), name="rh") rw = te.reduce_axis((0, kw), name="rw") temp_dtype = get_temp_dtype(kh, kw, odtype) sh, sw = stride dh, dw = dilation scale = input_scale / output_scale scale_fixed_point, rsh = get_fixed_point_value(scale, "int16") corr = (output_zero_point << rsh) - input_zero_point * scale_fixed_point dilated_kh = (kh - 1) * dh + 1 dilated_kw = (kw - 1) * dw + 1 pad_top, pad_left, pad_down, pad_right = get_pad_tuple( get_const_tuple(padding), (dilated_kh, dilated_kw) ) # DOPAD if pad_top != 0 or pad_down != 0 or pad_left != 0 or pad_right != 0: pad_before = (0, 0, pad_top, pad_left) pad_after = (0, 0, pad_down, pad_right) data_pad = pad(data, pad_before, pad_after, pad_value=input_zero_point, name="data_pad") else: # By definition when True, zero-padding will be included in the averaging calculation # This is equivalent to PoolArea = (kh * kw) count_include_pad = True data_pad = data Sum = te.compute( oshape, lambda b, c, h, w: te.sum( data_pad[b, c, h * sh + dh * rh, w * sw + dw * rw].astype(temp_dtype), axis=[rh, rw] ), name="pool_sum", ) if not count_include_pad: # Compute PoolArea using unpadded input tensor _, _, oh, ow = oshape _, _, ih, iw = data.shape PoolArea = te.compute( (oh, ow), lambda i, j: compute_PoolArea(i, j, ih, iw, kh, kw, sh, sw, dh, dw, pad_top, pad_left), name="pool_area", ) ScaleWithArea = te.compute( (oh, ow), lambda i, j: (scale_fixed_point // PoolArea[i, j]).astype("int32"), name="scale_with_area", ) Avg = te.compute( oshape, lambda b, c, h, w: saturate( ((Sum[b, c, h, w] * ScaleWithArea[h, w]) + corr + (1 << (rsh - 1))) >> rsh, odtype ).astype(odtype), name="pool_avg", ) else: ScaleWithArea = scale_fixed_point // (kh * kw) Avg = te.compute( oshape, lambda b, c, h, w: saturate( ((Sum[b, c, h, w] * ScaleWithArea) + corr + (1 << (rsh - 1))) >> rsh, odtype ).astype(odtype), name="pool_avg", ) return Avg def qnn_avg_pool2d_NHWC( data: te.Tensor, kernel: list, stride: list, padding: list, dilation: list, count_include_pad: bool, oshape: list, odtype: str, # quantization params: input_scale: float, input_zero_point: int, output_scale: float, output_zero_point: int, ): """Compute for quantized avg_pool2d""" kh, kw = kernel rh = te.reduce_axis((0, kh), name="rh") rw = te.reduce_axis((0, kw), name="rw") temp_dtype = get_temp_dtype(kh, kw, odtype) sh, sw = stride dh, dw = dilation scale = input_scale / output_scale scale_fixed_point, rsh = get_fixed_point_value(scale, "int16") corr = (output_zero_point << rsh) - input_zero_point * scale_fixed_point dilated_kh = (kh - 1) * dh + 1 dilated_kw = (kw - 1) * dw + 1 # Compute Area pad_top, pad_left, pad_down, pad_right = get_pad_tuple( get_const_tuple(padding), (dilated_kh, dilated_kw) ) # DOPAD if pad_top != 0 or pad_down != 0 or pad_left != 0 or pad_right != 0: pad_before = (0, pad_top, pad_left, 0) pad_after = (0, pad_down, pad_right, 0) data_pad = pad(data, pad_before, pad_after, pad_value=input_zero_point, name="data_pad") else: # By definition when True, zero-padding will be included in the averaging calculation # This is equivalent to PoolArea = (kh * kw) count_include_pad = True data_pad = data Sum = te.compute( oshape, lambda b, h, w, c: te.sum( data_pad[b, h * sh + dh * rh, w * sw + dw * rw, c].astype(temp_dtype), axis=[rh, rw] ), name="pool_sum", ) if not count_include_pad: # Compute PoolArea using unpadded input tensor _, oh, ow, _ = oshape _, ih, iw, _ = data.shape PoolArea = te.compute( (oh, ow), lambda i, j: compute_PoolArea(i, j, ih, iw, kh, kw, sh, sw, dh, dw, pad_top, pad_left), name="pool_area", ) ScaleWithArea = te.compute( (oh, ow), lambda i, j: tir.if_then_else( tir.all(PoolArea[i, j] > 0), (scale_fixed_point // PoolArea[i, j]).astype("int32"), 0, ), name="scale_with_area", ) Avg = te.compute( oshape, lambda b, h, w, c: saturate( ((Sum[b, h, w, c] * ScaleWithArea[h, w]) + corr + (1 << (rsh - 1))) >> rsh, odtype ).astype(odtype), name="pool_avg", ) else: ScaleWithArea = scale_fixed_point // (kh * kw) Avg = te.compute( oshape, lambda b, h, w, c: saturate( ((Sum[b, h, w, c] * ScaleWithArea) + corr + (1 << (rsh - 1))) >> rsh, odtype ).astype(odtype), name="pool_avg", ) return Avg def qnn_avg_pool2d_wrapper_compute_NCHW( data: te.Tensor, kernel: list, stride: list, padding: list, dilation: list, count_include_pad: bool, oshape: list, odtype: str, # quantization params: input_scale: float, input_zero_point: int, output_scale: float, output_zero_point: int, ): """Extract qnn params""" if ( is_scalar(input_scale) and is_scalar(output_scale) and is_scalar(input_zero_point) and is_scalar(output_zero_point) ):
iscale = get_const_float_value(input_scale)
4
2023-12-14 02:37:47+00:00
12k
yolain/ComfyUI-Easy-Use
py/easyNodes.py
[ { "identifier": "advanced_encode", "path": "py/adv_encode.py", "snippet": "def advanced_encode(clip, text, token_normalization, weight_interpretation, w_max=1.0, clip_balance=.5,\n apply_to_pooled=True):\n tokenized = clip.tokenize(text, return_word_ids=True)\n if isinstance(clip.cond_stage_model, (SDXLClipModel, SDXLRefinerClipModel, SDXLClipG)):\n embs_l = None\n embs_g = None\n pooled = None\n if 'l' in tokenized and isinstance(clip.cond_stage_model, SDXLClipModel):\n embs_l, _ = advanced_encode_from_tokens(tokenized['l'],\n token_normalization,\n weight_interpretation,\n lambda x: encode_token_weights(clip, x, encode_token_weights_l),\n w_max=w_max,\n return_pooled=False)\n if 'g' in tokenized:\n embs_g, pooled = advanced_encode_from_tokens(tokenized['g'],\n token_normalization,\n weight_interpretation,\n lambda x: encode_token_weights(clip, x,\n encode_token_weights_g),\n w_max=w_max,\n return_pooled=True,\n apply_to_pooled=apply_to_pooled)\n return prepareXL(embs_l, embs_g, pooled, clip_balance)\n else:\n return advanced_encode_from_tokens(tokenized['l'],\n token_normalization,\n weight_interpretation,\n lambda x: (clip.encode_from_tokens({'l': x}), None),\n w_max=w_max)" }, { "identifier": "advanced_encode_XL", "path": "py/adv_encode.py", "snippet": "def advanced_encode_XL(clip, text1, text2, token_normalization, weight_interpretation, w_max=1.0, clip_balance=.5,\n apply_to_pooled=True):\n tokenized1 = clip.tokenize(text1, return_word_ids=True)\n tokenized2 = clip.tokenize(text2, return_word_ids=True)\n\n embs_l, _ = advanced_encode_from_tokens(tokenized1['l'],\n token_normalization,\n weight_interpretation,\n lambda x: encode_token_weights(clip, x, encode_token_weights_l),\n w_max=w_max,\n return_pooled=False)\n\n embs_g, pooled = advanced_encode_from_tokens(tokenized2['g'],\n token_normalization,\n weight_interpretation,\n lambda x: encode_token_weights(clip, x, encode_token_weights_g),\n w_max=w_max,\n return_pooled=True,\n apply_to_pooled=apply_to_pooled)\n\n gcd_num = gcd(embs_l.shape[1], embs_g.shape[1])\n repeat_l = int((embs_g.shape[1] / gcd_num) * embs_l.shape[1])\n repeat_g = int((embs_l.shape[1] / gcd_num) * embs_g.shape[1])\n\n return prepareXL(embs_l.expand((-1, repeat_l, -1)), embs_g.expand((-1, repeat_g, -1)), pooled, clip_balance)" }, { "identifier": "BASE_RESOLUTIONS", "path": "py/config.py", "snippet": "BASE_RESOLUTIONS = [\n (\"自定义\", \"自定义\"),\n (512, 512),\n (512, 768),\n (768, 512),\n (576, 1024),\n (768, 1024),\n (768, 1280),\n (768, 1344),\n (768, 1536),\n (816, 1920),\n (832, 1152),\n (896, 1152),\n (896, 1088),\n (1024, 1024),\n (1024, 576),\n (1024, 768),\n (1080, 1920),\n (1440, 2560),\n (1088, 896),\n (1152, 832),\n (1152, 896),\n (1280, 768),\n (1344, 768),\n (1536, 640),\n (1536, 768),\n (1920, 816),\n (1920, 1080),\n (2560, 1440),\n]" }, { "identifier": "log_node_info", "path": "py/log.py", "snippet": "def log_node_info(node_name, message=None):\n \"\"\"Logs an info message.\"\"\"\n _log_node(COLORS_FG[\"CYAN\"], node_name, message)" }, { "identifier": "log_node_error", "path": "py/log.py", "snippet": "def log_node_error(node_name, message=None):\n \"\"\"Logs an warn message.\"\"\"\n _log_node(COLORS_FG[\"RED\"], node_name, message)" }, { "identifier": "log_node_warn", "path": "py/log.py", "snippet": "def log_node_warn(node_name, message=None):\n \"\"\"Logs an warn message.\"\"\"\n _log_node(COLORS_FG[\"YELLOW\"], node_name, message)" }, { "identifier": "log_node_success", "path": "py/log.py", "snippet": "def log_node_success(node_name, message=None):\n \"\"\"Logs a success message.\"\"\"\n _log_node(COLORS_FG[\"GREEN\"], node_name, message)" }, { "identifier": "process_with_loras", "path": "py/wildcards.py", "snippet": "def process_with_loras(wildcard_opt, model, clip, title=\"Positive\", seed=None, can_load_lora=True, pipe_lora_stack=[]):\n lora_name_cache = []\n\n pass1 = process(wildcard_opt, seed)\n loras = extract_lora_values(pass1)\n pass2 = remove_lora_tags(pass1)\n\n has_noodle_key = True if \"__\" in wildcard_opt else False\n has_loras = True if loras != [] else False\n show_wildcard_prompt = True if has_noodle_key or has_loras else False\n\n for lora_name, model_weight, clip_weight, lbw, lbw_a, lbw_b in loras:\n if (lora_name.split('.')[-1]) not in folder_paths.supported_pt_extensions:\n lora_name = lora_name+\".safetensors\"\n\n lora_name = resolve_lora_name(lora_name_cache, lora_name)\n\n path = folder_paths.get_full_path(\"loras\", lora_name)\n\n if path is not None:\n print(f\"LORA: {lora_name}: {model_weight}, {clip_weight}, LBW={lbw}, A={lbw_a}, B={lbw_b}\")\n\n def default_lora():\n return nodes.LoraLoader().load_lora(model, clip, lora_name, model_weight, clip_weight)\n\n if lbw is not None:\n cls = nodes.NODE_CLASS_MAPPINGS['LoraLoaderBlockWeight //Inspire']\n if can_load_lora:\n model, clip, _ = cls().doit(model, clip, lora_name, model_weight, clip_weight, False, 0, lbw_a, lbw_b, \"\", lbw)\n pipe_lora_stack.append({\n \"lora_name\": lora_name, \"model\": model, \"clip\": clip, \"lora_model_strength\": model_weight,\n \"lora_clip_strength\": clip_weight,\n \"lbw_a\": lbw_a,\n \"lbw_b\": lbw_b,\n \"lbw\": lbw\n })\n else:\n pipe_lora_stack.append({\"lora_name\": lora_name, \"model\": model, \"clip\": clip, \"lora_model_strength\": model_weight, \"lora_clip_strength\": clip_weight})\n if can_load_lora:\n model, clip = default_lora()\n else:\n print(f\"LORA NOT FOUND: {lora_name}\")\n\n # print(f\"{title}: {pass2}\")\n # print(f'{title}_decode:', pass1)\n\n return model, clip, pass2, pass1, show_wildcard_prompt, pipe_lora_stack" }, { "identifier": "get_wildcard_list", "path": "py/wildcards.py", "snippet": "def get_wildcard_list():\n return [f\"__{x}__\" for x in easy_wildcard_dict.keys()]" }, { "identifier": "sample_dpmpp_2s_ancestral", "path": "py/gradual_latent_hires_fix.py", "snippet": "@torch.no_grad()\ndef sample_dpmpp_2s_ancestral(\n model,\n x,\n sigmas,\n extra_args=None,\n callback=None,\n disable=None,\n eta=1.0,\n s_noise=1.0,\n noise_sampler=None,\n upscale_ratio=2.0,\n start_step=5,\n end_step=15,\n upscale_n_step=3,\n unsharp_kernel_size=3,\n unsharp_sigma=0.5,\n unsharp_strength=0.0,\n):\n \"\"\"Ancestral sampling with DPM-Solver++(2S) second-order steps.\"\"\"\n extra_args = {} if extra_args is None else extra_args\n s_in = x.new_ones([x.shape[0]])\n sigma_fn = lambda t: t.neg().exp()\n t_fn = lambda sigma: sigma.log().neg()\n\n # make upscale info\n upscale_steps = []\n step = start_step - 1\n while step < end_step - 1:\n upscale_steps.append(step)\n step += upscale_n_step\n height, width = x.shape[2:]\n upscale_shapes = [\n (int(height * (((upscale_ratio - 1) / i) + 1)), int(width * (((upscale_ratio - 1) / i) + 1)))\n for i in reversed(range(1, len(upscale_steps) + 1))\n ]\n upscale_info = {k: v for k, v in zip(upscale_steps, upscale_shapes)}\n\n for i in trange(len(sigmas) - 1, disable=disable):\n denoised = model(x, sigmas[i] * s_in, **extra_args)\n sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1], eta=eta)\n if callback is not None:\n callback({\"x\": x, \"i\": i, \"sigma\": sigmas[i], \"sigma_hat\": sigmas[i], \"denoised\": denoised})\n if sigma_down == 0:\n # Euler method\n d = to_d(x, sigmas[i], denoised)\n dt = sigma_down - sigmas[i]\n x = x + d * dt\n else:\n # DPM-Solver++(2S)\n t, t_next = t_fn(sigmas[i]), t_fn(sigma_down)\n r = 1 / 2\n h = t_next - t\n s = t + r * h\n x_2 = (sigma_fn(s) / sigma_fn(t)) * x - (-h * r).expm1() * denoised\n denoised_2 = model(x_2, sigma_fn(s) * s_in, **extra_args)\n x = (sigma_fn(t_next) / sigma_fn(t)) * x - (-h).expm1() * denoised_2\n # Noise addition\n if sigmas[i + 1] > 0:\n # Resize\n if i in upscale_info:\n x = torch.nn.functional.interpolate(x, size=upscale_info[i], mode=\"bicubic\", align_corners=False)\n if unsharp_strength > 0:\n blurred = gaussian_blur(x, kernel_size=unsharp_kernel_size, sigma=unsharp_sigma)\n x = x + unsharp_strength * (x - blurred)\n noise_sampler = default_noise_sampler(x)\n noise = noise_sampler(sigmas[i], sigmas[i + 1])\n x = x + noise * sigma_up * s_noise\n return x" }, { "identifier": "sample_dpmpp_2m_sde", "path": "py/gradual_latent_hires_fix.py", "snippet": "@torch.no_grad()\ndef sample_dpmpp_2m_sde(\n model,\n x,\n sigmas,\n extra_args=None,\n callback=None,\n disable=None,\n eta=1.0,\n s_noise=1.0,\n noise_sampler=None,\n solver_type=\"midpoint\",\n upscale_ratio=2.0,\n start_step=5,\n end_step=15,\n upscale_n_step=3,\n unsharp_kernel_size=3,\n unsharp_sigma=0.5,\n unsharp_strength=0.0,\n):\n \"\"\"DPM-Solver++(2M) SDE.\"\"\"\n\n if solver_type not in {\"heun\", \"midpoint\"}:\n raise ValueError(\"solver_type must be 'heun' or 'midpoint'\")\n\n seed = extra_args.get(\"seed\", None)\n sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()\n extra_args = {} if extra_args is None else extra_args\n s_in = x.new_ones([x.shape[0]])\n\n old_denoised = None\n h_last = None\n h = None\n\n # make upscale info\n upscale_steps = []\n step = start_step - 1\n while step < end_step - 1:\n upscale_steps.append(step)\n step += upscale_n_step\n height, width = x.shape[2:]\n upscale_shapes = [\n (int(height * (((upscale_ratio - 1) / i) + 1)), int(width * (((upscale_ratio - 1) / i) + 1)))\n for i in reversed(range(1, len(upscale_steps) + 1))\n ]\n upscale_info = {k: v for k, v in zip(upscale_steps, upscale_shapes)}\n\n for i in trange(len(sigmas) - 1, disable=disable):\n denoised = model(x, sigmas[i] * s_in, **extra_args)\n if callback is not None:\n callback({\"x\": x, \"i\": i, \"sigma\": sigmas[i], \"sigma_hat\": sigmas[i], \"denoised\": denoised})\n if sigmas[i + 1] == 0:\n # Denoising step\n x = denoised\n else:\n # DPM-Solver++(2M) SDE\n t, s = -sigmas[i].log(), -sigmas[i + 1].log()\n h = s - t\n eta_h = eta * h\n\n x = sigmas[i + 1] / sigmas[i] * (-eta_h).exp() * x + (-h - eta_h).expm1().neg() * denoised\n\n if old_denoised is not None:\n r = h_last / h\n if solver_type == \"heun\":\n x = x + ((-h - eta_h).expm1().neg() / (-h - eta_h) + 1) * (1 / r) * (denoised - old_denoised)\n elif solver_type == \"midpoint\":\n x = x + 0.5 * (-h - eta_h).expm1().neg() * (1 / r) * (denoised - old_denoised)\n\n if eta:\n # Resize\n if i in upscale_info:\n x = torch.nn.functional.interpolate(x, size=upscale_info[i], mode=\"bicubic\", align_corners=False)\n if unsharp_strength > 0:\n blurred = gaussian_blur(x, kernel_size=unsharp_kernel_size, sigma=unsharp_sigma)\n x = x + unsharp_strength * (x - blurred)\n denoised = None # 次ステップとサイズがあわないのでとりあえずNoneにしておく。\n noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=seed, cpu=True)\n x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * sigmas[i + 1] * (-2 * eta_h).expm1().neg().sqrt() * s_noise\n\n old_denoised = denoised\n h_last = h\n return x" }, { "identifier": "sample_lcm", "path": "py/gradual_latent_hires_fix.py", "snippet": "@torch.no_grad()\ndef sample_lcm(\n model,\n x,\n sigmas,\n extra_args=None,\n callback=None,\n disable=None,\n noise_sampler=None,\n eta=None,\n s_noise=None,\n upscale_ratio=2.0,\n start_step=5,\n end_step=15,\n upscale_n_step=3,\n unsharp_kernel_size=3,\n unsharp_sigma=0.5,\n unsharp_strength=0.0,\n):\n extra_args = {} if extra_args is None else extra_args\n s_in = x.new_ones([x.shape[0]])\n\n # make upscale info\n upscale_steps = []\n step = start_step - 1\n while step < end_step - 1:\n upscale_steps.append(step)\n step += upscale_n_step\n height, width = x.shape[2:]\n upscale_shapes = [\n (int(height * (((upscale_ratio - 1) / i) + 1)), int(width * (((upscale_ratio - 1) / i) + 1)))\n for i in reversed(range(1, len(upscale_steps) + 1))\n ]\n upscale_info = {k: v for k, v in zip(upscale_steps, upscale_shapes)}\n\n for i in trange(len(sigmas) - 1, disable=disable):\n denoised = model(x, sigmas[i] * s_in, **extra_args)\n if callback is not None:\n callback({\"x\": x, \"i\": i, \"sigma\": sigmas[i], \"sigma_hat\": sigmas[i], \"denoised\": denoised})\n\n x = denoised\n if sigmas[i + 1] > 0:\n # Resize\n if i in upscale_info:\n x = torch.nn.functional.interpolate(x, size=upscale_info[i], mode=\"bicubic\", align_corners=False)\n if unsharp_strength > 0:\n blurred = gaussian_blur(x, kernel_size=unsharp_kernel_size, sigma=unsharp_sigma)\n x = x + unsharp_strength * (x - blurred)\n noise_sampler = default_noise_sampler(x)\n x += sigmas[i + 1] * noise_sampler(sigmas[i], sigmas[i + 1])\n\n return x" }, { "identifier": "sample_euler_ancestral", "path": "py/gradual_latent_hires_fix.py", "snippet": "@torch.no_grad()\ndef sample_euler_ancestral(\n model,\n x,\n sigmas,\n extra_args=None,\n callback=None,\n disable=None,\n eta=1.0,\n s_noise=1.0,\n noise_sampler=None,\n upscale_ratio=2.0,\n start_step=5,\n end_step=15,\n upscale_n_step=3,\n unsharp_kernel_size=3,\n unsharp_sigma=0.5,\n unsharp_strength=0.0,\n):\n \"\"\"Ancestral sampling with Euler method steps.\"\"\"\n extra_args = {} if extra_args is None else extra_args\n noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler\n s_in = x.new_ones([x.shape[0]])\n\n # make upscale info\n upscale_steps = []\n step = start_step - 1\n while step < end_step - 1:\n upscale_steps.append(step)\n step += upscale_n_step\n height, width = x.shape[2:]\n upscale_shapes = [\n (int(height * (((upscale_ratio - 1) / i) + 1)), int(width * (((upscale_ratio - 1) / i) + 1)))\n for i in reversed(range(1, len(upscale_steps) + 1))\n ]\n upscale_info = {k: v for k, v in zip(upscale_steps, upscale_shapes)}\n\n for i in trange(len(sigmas) - 1, disable=disable):\n denoised = model(x, sigmas[i] * s_in, **extra_args)\n sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1], eta=eta)\n if callback is not None:\n callback({\"x\": x, \"i\": i, \"sigma\": sigmas[i], \"sigma_hat\": sigmas[i], \"denoised\": denoised})\n d = to_d(x, sigmas[i], denoised)\n # Euler method\n dt = sigma_down - sigmas[i]\n x = x + d * dt\n if sigmas[i + 1] > 0:\n # Resize\n if i in upscale_info:\n x = torch.nn.functional.interpolate(x, size=upscale_info[i], mode=\"bicubic\", align_corners=False)\n if unsharp_strength > 0:\n blurred = gaussian_blur(x, kernel_size=unsharp_kernel_size, sigma=unsharp_sigma)\n x = x + unsharp_strength * (x - blurred)\n\n noise_sampler = default_noise_sampler(x)\n noise = noise_sampler(sigmas[i], sigmas[i + 1])\n x = x + noise * sigma_up * s_noise\n return x" }, { "identifier": "DynThresh", "path": "py/dynthres_core.py", "snippet": "class DynThresh:\n\n Modes = [\"Constant\", \"Linear Down\", \"Cosine Down\", \"Half Cosine Down\", \"Linear Up\", \"Cosine Up\", \"Half Cosine Up\", \"Power Up\", \"Power Down\", \"Linear Repeating\", \"Cosine Repeating\", \"Sawtooth\"]\n Startpoints = [\"MEAN\", \"ZERO\"]\n Variabilities = [\"AD\", \"STD\"]\n\n def __init__(self, mimic_scale, threshold_percentile, mimic_mode, mimic_scale_min, cfg_mode, cfg_scale_min, sched_val, experiment_mode, max_steps, separate_feature_channels, scaling_startpoint, variability_measure, interpolate_phi):\n self.mimic_scale = mimic_scale\n self.threshold_percentile = threshold_percentile\n self.mimic_mode = mimic_mode\n self.cfg_mode = cfg_mode\n self.max_steps = max_steps\n self.cfg_scale_min = cfg_scale_min\n self.mimic_scale_min = mimic_scale_min\n self.experiment_mode = experiment_mode\n self.sched_val = sched_val\n self.sep_feat_channels = separate_feature_channels\n self.scaling_startpoint = scaling_startpoint\n self.variability_measure = variability_measure\n self.interpolate_phi = interpolate_phi\n\n def interpret_scale(self, scale, mode, min):\n scale -= min\n max = self.max_steps - 1\n frac = self.step / max\n if mode == \"Constant\":\n pass\n elif mode == \"Linear Down\":\n scale *= 1.0 - frac\n elif mode == \"Half Cosine Down\":\n scale *= math.cos(frac)\n elif mode == \"Cosine Down\":\n scale *= math.cos(frac * 1.5707)\n elif mode == \"Linear Up\":\n scale *= frac\n elif mode == \"Half Cosine Up\":\n scale *= 1.0 - math.cos(frac)\n elif mode == \"Cosine Up\":\n scale *= 1.0 - math.cos(frac * 1.5707)\n elif mode == \"Power Up\":\n scale *= math.pow(frac, self.sched_val)\n elif mode == \"Power Down\":\n scale *= 1.0 - math.pow(frac, self.sched_val)\n elif mode == \"Linear Repeating\":\n portion = (frac * self.sched_val) % 1.0\n scale *= (0.5 - portion) * 2 if portion < 0.5 else (portion - 0.5) * 2\n elif mode == \"Cosine Repeating\":\n scale *= math.cos(frac * 6.28318 * self.sched_val) * 0.5 + 0.5\n elif mode == \"Sawtooth\":\n scale *= (frac * self.sched_val) % 1.0\n scale += min\n return scale\n\n def dynthresh(self, cond, uncond, cfg_scale, weights):\n mimic_scale = self.interpret_scale(self.mimic_scale, self.mimic_mode, self.mimic_scale_min)\n cfg_scale = self.interpret_scale(cfg_scale, self.cfg_mode, self.cfg_scale_min)\n # uncond shape is (batch, 4, height, width)\n conds_per_batch = cond.shape[0] / uncond.shape[0]\n assert conds_per_batch == int(conds_per_batch), \"Expected # of conds per batch to be constant across batches\"\n cond_stacked = cond.reshape((-1, int(conds_per_batch)) + uncond.shape[1:])\n\n ### Normal first part of the CFG Scale logic, basically\n diff = cond_stacked - uncond.unsqueeze(1)\n if weights is not None:\n diff = diff * weights\n relative = diff.sum(1)\n\n ### Get the normal result for both mimic and normal scale\n mim_target = uncond + relative * mimic_scale\n cfg_target = uncond + relative * cfg_scale\n ### If we weren't doing mimic scale, we'd just return cfg_target here\n\n ### Now recenter the values relative to their average rather than absolute, to allow scaling from average\n mim_flattened = mim_target.flatten(2)\n cfg_flattened = cfg_target.flatten(2)\n mim_means = mim_flattened.mean(dim=2).unsqueeze(2)\n cfg_means = cfg_flattened.mean(dim=2).unsqueeze(2)\n mim_centered = mim_flattened - mim_means\n cfg_centered = cfg_flattened - cfg_means\n\n if self.sep_feat_channels:\n if self.variability_measure == 'STD':\n mim_scaleref = mim_centered.std(dim=2).unsqueeze(2)\n cfg_scaleref = cfg_centered.std(dim=2).unsqueeze(2)\n else: # 'AD'\n mim_scaleref = mim_centered.abs().max(dim=2).values.unsqueeze(2)\n cfg_scaleref = torch.quantile(cfg_centered.abs(), self.threshold_percentile, dim=2).unsqueeze(2)\n\n else:\n if self.variability_measure == 'STD':\n mim_scaleref = mim_centered.std()\n cfg_scaleref = cfg_centered.std()\n else: # 'AD'\n mim_scaleref = mim_centered.abs().max()\n cfg_scaleref = torch.quantile(cfg_centered.abs(), self.threshold_percentile)\n\n if self.scaling_startpoint == 'ZERO':\n scaling_factor = mim_scaleref / cfg_scaleref\n result = cfg_flattened * scaling_factor\n\n else: # 'MEAN'\n if self.variability_measure == 'STD':\n cfg_renormalized = (cfg_centered / cfg_scaleref) * mim_scaleref\n else: # 'AD'\n ### Get the maximum value of all datapoints (with an optional threshold percentile on the uncond)\n max_scaleref = torch.maximum(mim_scaleref, cfg_scaleref)\n ### Clamp to the max\n cfg_clamped = cfg_centered.clamp(-max_scaleref, max_scaleref)\n ### Now shrink from the max to normalize and grow to the mimic scale (instead of the CFG scale)\n cfg_renormalized = (cfg_clamped / max_scaleref) * mim_scaleref\n\n ### Now add it back onto the averages to get into real scale again and return\n result = cfg_renormalized + cfg_means\n\n actual_res = result.unflatten(2, mim_target.shape[2:])\n\n if self.interpolate_phi != 1.0:\n actual_res = actual_res * self.interpolate_phi + cfg_target * (1.0 - self.interpolate_phi)\n\n if self.experiment_mode == 1:\n num = actual_res.cpu().numpy()\n for y in range(0, 64):\n for x in range (0, 64):\n if num[0][0][y][x] > 1.0:\n num[0][1][y][x] *= 0.5\n if num[0][1][y][x] > 1.0:\n num[0][1][y][x] *= 0.5\n if num[0][2][y][x] > 1.5:\n num[0][2][y][x] *= 0.5\n actual_res = torch.from_numpy(num).to(device=uncond.device)\n elif self.experiment_mode == 2:\n num = actual_res.cpu().numpy()\n for y in range(0, 64):\n for x in range (0, 64):\n over_scale = False\n for z in range(0, 4):\n if abs(num[0][z][y][x]) > 1.5:\n over_scale = True\n if over_scale:\n for z in range(0, 4):\n num[0][z][y][x] *= 0.7\n actual_res = torch.from_numpy(num).to(device=uncond.device)\n elif self.experiment_mode == 3:\n coefs = torch.tensor([\n # R G B W\n [0.298, 0.207, 0.208, 0.0], # L1\n [0.187, 0.286, 0.173, 0.0], # L2\n [-0.158, 0.189, 0.264, 0.0], # L3\n [-0.184, -0.271, -0.473, 1.0], # L4\n ], device=uncond.device)\n res_rgb = torch.einsum(\"laxy,ab -> lbxy\", actual_res, coefs)\n max_r, max_g, max_b, max_w = res_rgb[0][0].max(), res_rgb[0][1].max(), res_rgb[0][2].max(), res_rgb[0][3].max()\n max_rgb = max(max_r, max_g, max_b)\n print(f\"test max = r={max_r}, g={max_g}, b={max_b}, w={max_w}, rgb={max_rgb}\")\n if self.step / (self.max_steps - 1) > 0.2:\n if max_rgb < 2.0 and max_w < 3.0:\n res_rgb /= max_rgb / 2.4\n else:\n if max_rgb > 2.4 and max_w > 3.0:\n res_rgb /= max_rgb / 2.4\n actual_res = torch.einsum(\"laxy,ab -> lbxy\", res_rgb, coefs.inverse())\n\n return actual_res" } ]
import sys import os import re import json import time import math import torch import psutil import random import datetime import comfy.sd import comfy.utils import numpy as np import folder_paths import comfy.samplers import comfy.controlnet import latent_preview import comfy.model_base import comfy.model_management from pathlib import Path from comfy.sd import CLIP, VAE from comfy.cli_args import args from urllib.request import urlopen from collections import defaultdict from PIL.PngImagePlugin import PngInfo from PIL import Image, ImageDraw, ImageFont from comfy.model_patcher import ModelPatcher from comfy_extras.chainner_models import model_loading from typing import Dict, List, Optional, Tuple, Union, Any from .adv_encode import advanced_encode, advanced_encode_XL from server import PromptServer from nodes import VAELoader, MAX_RESOLUTION, RepeatLatentBatch, NODE_CLASS_MAPPINGS as ALL_NODE_CLASS_MAPPINGS, ConditioningSetMask from comfy_extras.nodes_mask import LatentCompositeMasked from .config import BASE_RESOLUTIONS from .log import log_node_info, log_node_error, log_node_warn, log_node_success from .wildcards import process_with_loras, get_wildcard_list from comfy_extras.nodes_stable3d import camera_embeddings from .gradual_latent_hires_fix import sample_dpmpp_2s_ancestral, sample_dpmpp_2m_sde, sample_lcm, sample_euler_ancestral from .dynthres_core import DynThresh
10,499
"hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID"}, } RETURN_TYPES = ("PIPE_LINE", ) RETURN_NAMES = ("pipe",) OUTPUT_NODE = True FUNCTION = "settings" CATEGORY = "EasyUse/PreSampling" def settings(self, pipe, steps, cfg, sampler_name, scheduler, start_at_step, end_at_step, add_noise, seed_num, image_to_latent=None, latent=None, prompt=None, extra_pnginfo=None, my_unique_id=None): # if my_unique_id: # workflow = extra_pnginfo["workflow"] # node = next((x for x in workflow["nodes"] if str(x["id"]) == my_unique_id), None) # if node: # seed_num = prompt[my_unique_id]['inputs']['seed_num'] if 'seed_num' in prompt[my_unique_id][ # 'inputs'] else 0 # length = len(node["widgets_values"]) # node["widgets_values"][length - 2] = seed_num # 图生图转换 vae = pipe["vae"] batch_size = pipe["loader_settings"]["batch_size"] if "batch_size" in pipe["loader_settings"] else 1 if image_to_latent is not None: samples = {"samples": vae.encode(image_to_latent)} samples = RepeatLatentBatch().repeat(samples, batch_size)[0] images = image_to_latent elif latent is not None: samples = RepeatLatentBatch().repeat(latent, batch_size)[0] images = pipe["images"] else: samples = pipe["samples"] images = pipe["images"] new_pipe = { "model": pipe['model'], "positive": pipe['positive'], "negative": pipe['negative'], "vae": pipe['vae'], "clip": pipe['clip'], "samples": samples, "images": images, "seed": seed_num, "loader_settings": { **pipe["loader_settings"], "steps": steps, "cfg": cfg, "sampler_name": sampler_name, "scheduler": scheduler, "start_step": start_at_step, "last_step": end_at_step, "denoise": 1.0, "add_noise": add_noise } } del pipe return {"ui": {"value": [seed_num]}, "result": (new_pipe,)} # 预采样设置(SDTurbo) class sdTurboSettings: def __init__(self): pass @classmethod def INPUT_TYPES(cls): return {"required": { "pipe": ("PIPE_LINE",), "steps": ("INT", {"default": 1, "min": 1, "max": 10}), "cfg": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0}), "sampler_name": (comfy.samplers.SAMPLER_NAMES,), "eta": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01, "round": False}), "s_noise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01, "round": False}), "upscale_ratio": ("FLOAT", {"default": 2.0, "min": 0.0, "max": 16.0, "step": 0.01, "round": False}), "start_step": ("INT", {"default": 5, "min": 0, "max": 1000, "step": 1}), "end_step": ("INT", {"default": 15, "min": 0, "max": 1000, "step": 1}), "upscale_n_step": ("INT", {"default": 3, "min": 0, "max": 1000, "step": 1}), "unsharp_kernel_size": ("INT", {"default": 3, "min": 1, "max": 21, "step": 1}), "unsharp_sigma": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 10.0, "step": 0.01, "round": False}), "unsharp_strength": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 10.0, "step": 0.01, "round": False}), "seed_num": ("INT", {"default": 0, "min": 0, "max": 1125899906842624}), }, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID"}, } RETURN_TYPES = ("PIPE_LINE",) RETURN_NAMES = ("pipe",) OUTPUT_NODE = True FUNCTION = "settings" CATEGORY = "EasyUse/PreSampling" def settings(self, pipe, steps, cfg, sampler_name, eta, s_noise, upscale_ratio, start_step, end_step, upscale_n_step, unsharp_kernel_size, unsharp_sigma, unsharp_strength, seed_num, prompt=None, extra_pnginfo=None, my_unique_id=None): model = pipe['model'] # sigma timesteps = torch.flip(torch.arange(1, 11) * 100 - 1, (0,))[:steps] sigmas = model.model.model_sampling.sigma(timesteps) sigmas = torch.cat([sigmas, sigmas.new_zeros([1])]) #sampler sample_function = None extra_options = { "eta": eta, "s_noise": s_noise, "upscale_ratio": upscale_ratio, "start_step": start_step, "end_step": end_step, "upscale_n_step": upscale_n_step, "unsharp_kernel_size": unsharp_kernel_size, "unsharp_sigma": unsharp_sigma, "unsharp_strength": unsharp_strength, } if sampler_name == "euler_ancestral": sample_function = sample_euler_ancestral elif sampler_name == "dpmpp_2s_ancestral":
# 加载器 class easyLoader: def __init__(self): self.loaded_objects = { "ckpt": defaultdict(tuple), # {ckpt_name: (model, ...)} "clip": defaultdict(tuple), "clip_vision": defaultdict(tuple), "bvae": defaultdict(tuple), "vae": defaultdict(object), "lora": defaultdict(dict), # {lora_name: {UID: (model_lora, clip_lora)}} } self.memory_threshold = self.determine_memory_threshold(0.7) def clean_values(self, values: str): original_values = values.split("; ") cleaned_values = [] for value in original_values: cleaned_value = value.strip(';').strip() if cleaned_value == "": continue try: cleaned_value = int(cleaned_value) except ValueError: try: cleaned_value = float(cleaned_value) except ValueError: pass cleaned_values.append(cleaned_value) return cleaned_values def clear_unused_objects(self, desired_names: set, object_type: str): keys = set(self.loaded_objects[object_type].keys()) for key in keys - desired_names: del self.loaded_objects[object_type][key] def get_input_value(self, entry, key): val = entry["inputs"][key] return val if isinstance(val, str) else val[0] def process_pipe_loader(self, entry, desired_ckpt_names, desired_vae_names, desired_lora_names, desired_lora_settings, num_loras=3, suffix=""): for idx in range(1, num_loras + 1): lora_name_key = f"{suffix}lora{idx}_name" desired_lora_names.add(self.get_input_value(entry, lora_name_key)) setting = f'{self.get_input_value(entry, lora_name_key)};{entry["inputs"][f"{suffix}lora{idx}_model_strength"]};{entry["inputs"][f"{suffix}lora{idx}_clip_strength"]}' desired_lora_settings.add(setting) desired_ckpt_names.add(self.get_input_value(entry, f"{suffix}ckpt_name")) desired_vae_names.add(self.get_input_value(entry, f"{suffix}vae_name")) def update_loaded_objects(self, prompt): desired_ckpt_names = set() desired_vae_names = set() desired_lora_names = set() desired_lora_settings = set() for entry in prompt.values(): class_type = entry["class_type"] if class_type == "easy a1111Loader" or class_type == "easy comfyLoader": lora_name = self.get_input_value(entry, "lora_name") desired_lora_names.add(lora_name) setting = f'{lora_name};{entry["inputs"]["lora_model_strength"]};{entry["inputs"]["lora_clip_strength"]}' desired_lora_settings.add(setting) desired_ckpt_names.add(self.get_input_value(entry, "ckpt_name")) desired_vae_names.add(self.get_input_value(entry, "vae_name")) elif class_type == "easy zero123Loader" or class_type == 'easy svdLoader': desired_ckpt_names.add(self.get_input_value(entry, "ckpt_name")) desired_vae_names.add(self.get_input_value(entry, "vae_name")) elif class_type == "easy XYInputs: ModelMergeBlocks": desired_ckpt_names.add(self.get_input_value(entry, "ckpt_name_1")) desired_ckpt_names.add(self.get_input_value(entry, "ckpt_name_2")) vae_use = self.get_input_value(entry, "vae_use") if vae_use != 'Use Model 1' and vae_use != 'Use Model 2': desired_vae_names.add(vae_use) object_types = ["ckpt", "clip", "bvae", "vae", "lora"] for object_type in object_types: desired_names = desired_ckpt_names if object_type in ["ckpt", "clip", "bvae"] else desired_vae_names if object_type == "vae" else desired_lora_names self.clear_unused_objects(desired_names, object_type) def add_to_cache(self, obj_type, key, value): """ Add an item to the cache with the current timestamp. """ timestamped_value = (value, time.time()) self.loaded_objects[obj_type][key] = timestamped_value def determine_memory_threshold(self, percentage=0.8): """ Determines the memory threshold as a percentage of the total available memory. Args: - percentage (float): The fraction of total memory to use as the threshold. Should be a value between 0 and 1. Default is 0.8 (80%). Returns: - memory_threshold (int): Memory threshold in bytes. """ total_memory = psutil.virtual_memory().total memory_threshold = total_memory * percentage return memory_threshold def get_memory_usage(self): """ Returns the memory usage of the current process in bytes. """ process = psutil.Process(os.getpid()) return process.memory_info().rss def eviction_based_on_memory(self): """ Evicts objects from cache based on memory usage and priority. """ current_memory = self.get_memory_usage() if current_memory < self.memory_threshold: return eviction_order = ["vae", "lora", "bvae", "clip", "ckpt"] for obj_type in eviction_order: if current_memory < self.memory_threshold: break # Sort items based on age (using the timestamp) items = list(self.loaded_objects[obj_type].items()) items.sort(key=lambda x: x[1][1]) # Sorting by timestamp for item in items: if current_memory < self.memory_threshold: break del self.loaded_objects[obj_type][item[0]] current_memory = self.get_memory_usage() def load_checkpoint(self, ckpt_name, config_name=None, load_vision=False): cache_name = ckpt_name if config_name not in [None, "Default"]: cache_name = ckpt_name + "_" + config_name if cache_name in self.loaded_objects["ckpt"]: cache_out = self.loaded_objects["clip_vision"][cache_name][0] if load_vision else self.loaded_objects["clip"][cache_name][0] return self.loaded_objects["ckpt"][cache_name][0], cache_out, self.loaded_objects["bvae"][cache_name][0] ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name) output_clip = False if load_vision else True output_clipvision = True if load_vision else False if config_name not in [None, "Default"]: config_path = folder_paths.get_full_path("configs", config_name) loaded_ckpt = comfy.sd.load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=output_clip, output_clipvision=output_clipvision, embedding_directory=folder_paths.get_folder_paths("embeddings")) else: loaded_ckpt = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=output_clip, output_clipvision=output_clipvision, embedding_directory=folder_paths.get_folder_paths("embeddings")) self.add_to_cache("ckpt", cache_name, loaded_ckpt[0]) self.add_to_cache("bvae", cache_name, loaded_ckpt[2]) if load_vision: out = loaded_ckpt[3] self.add_to_cache("clip_vision", cache_name, out) else: out = loaded_ckpt[1] self.add_to_cache("clip", cache_name, loaded_ckpt[1]) self.eviction_based_on_memory() return loaded_ckpt[0], out, loaded_ckpt[2] def load_vae(self, vae_name): if vae_name in self.loaded_objects["vae"]: return self.loaded_objects["vae"][vae_name][0] vae_path = folder_paths.get_full_path("vae", vae_name) sd = comfy.utils.load_torch_file(vae_path) loaded_vae = comfy.sd.VAE(sd=sd) self.add_to_cache("vae", vae_name, loaded_vae) self.eviction_based_on_memory() return loaded_vae def load_lora(self, lora_name, model, clip, strength_model, strength_clip): model_hash = str(model)[44:-1] clip_hash = str(clip)[25:-1] unique_id = f'{model_hash};{clip_hash};{lora_name};{strength_model};{strength_clip}' if unique_id in self.loaded_objects["lora"] and unique_id in self.loaded_objects["lora"][lora_name]: return self.loaded_objects["lora"][unique_id][0] lora_path = folder_paths.get_full_path("loras", lora_name) lora = comfy.utils.load_torch_file(lora_path, safe_load=True) model_lora, clip_lora = comfy.sd.load_lora_for_models(model, clip, lora, strength_model, strength_clip) self.add_to_cache("lora", unique_id, (model_lora, clip_lora)) self.eviction_based_on_memory() return model_lora, clip_lora # 采样器 class easySampler: def __init__(self): self.last_helds: dict[str, list] = { "results": [], "pipe_line": [], } @staticmethod def tensor2pil(image: torch.Tensor) -> Image.Image: """Convert a torch tensor to a PIL image.""" return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8)) @staticmethod def pil2tensor(image: Image.Image) -> torch.Tensor: """Convert a PIL image to a torch tensor.""" return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0) @staticmethod def enforce_mul_of_64(d): d = int(d) if d <= 7: d = 8 leftover = d % 8 # 8 is the number of pixels per byte if leftover != 0: # if the number of pixels is not a multiple of 8 if (leftover < 4): # if the number of pixels is less than 4 d -= leftover # remove the leftover pixels else: # if the number of pixels is more than 4 d += 8 - leftover # add the leftover pixels return int(d) @staticmethod def safe_split(to_split: str, delimiter: str) -> List[str]: """Split the input string and return a list of non-empty parts.""" parts = to_split.split(delimiter) parts = [part for part in parts if part not in ('', ' ', ' ')] while len(parts) < 2: parts.append('None') return parts def common_ksampler(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False, preview_latent=True, disable_pbar=False): device = comfy.model_management.get_torch_device() latent_image = latent["samples"] if disable_noise: noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu") else: batch_inds = latent["batch_index"] if "batch_index" in latent else None noise = comfy.sample.prepare_noise(latent_image, seed, batch_inds) noise_mask = None if "noise_mask" in latent: noise_mask = latent["noise_mask"] preview_format = "JPEG" if preview_format not in ["JPEG", "PNG"]: preview_format = "JPEG" previewer = False if preview_latent: previewer = latent_preview.get_previewer(device, model.model.latent_format) pbar = comfy.utils.ProgressBar(steps) def callback(step, x0, x, total_steps): preview_bytes = None if previewer: preview_bytes = previewer.decode_latent_to_preview_image(preview_format, x0) pbar.update_absolute(step + 1, total_steps, preview_bytes) samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise, disable_noise=disable_noise, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, noise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed) out = latent.copy() out["samples"] = samples return out def custom_ksampler(self, model, seed, steps, cfg, _sampler, sigmas, positive, negative, latent, disable_noise=False, preview_latent=True, disable_pbar=False): device = comfy.model_management.get_torch_device() latent_image = latent["samples"] if disable_noise: noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu") else: batch_inds = latent["batch_index"] if "batch_index" in latent else None noise = comfy.sample.prepare_noise(latent_image, seed, batch_inds) noise_mask = None if "noise_mask" in latent: noise_mask = latent["noise_mask"] preview_format = "JPEG" if preview_format not in ["JPEG", "PNG"]: preview_format = "JPEG" previewer = False if preview_latent: previewer = latent_preview.get_previewer(device, model.model.latent_format) pbar = comfy.utils.ProgressBar(steps) def callback(step, x0, x, total_steps): preview_bytes = None if previewer: preview_bytes = previewer.decode_latent_to_preview_image(preview_format, x0) pbar.update_absolute(step + 1, total_steps, preview_bytes) samples = comfy.sample.sample_custom(model, noise, cfg, _sampler, sigmas, positive, negative, latent_image, noise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed) out = latent.copy() out["samples"] = samples return out def get_value_by_id(self, key: str, my_unique_id: Any) -> Optional[Any]: """Retrieve value by its associated ID.""" try: for value, id_ in self.last_helds[key]: if id_ == my_unique_id: return value except KeyError: return None def update_value_by_id(self, key: str, my_unique_id: Any, new_value: Any) -> Union[bool, None]: """Update the value associated with a given ID. Return True if updated, False if appended, None if key doesn't exist.""" try: for i, (value, id_) in enumerate(self.last_helds[key]): if id_ == my_unique_id: self.last_helds[key][i] = (new_value, id_) return True self.last_helds[key].append((new_value, my_unique_id)) return False except KeyError: return False def upscale(self, samples, upscale_method, scale_by, crop): s = samples.copy() width = self.enforce_mul_of_64(round(samples["samples"].shape[3] * scale_by)) height = self.enforce_mul_of_64(round(samples["samples"].shape[2] * scale_by)) if (width > MAX_RESOLUTION): width = MAX_RESOLUTION if (height > MAX_RESOLUTION): height = MAX_RESOLUTION s["samples"] = comfy.utils.common_upscale(samples["samples"], width, height, upscale_method, crop) return (s,) def handle_upscale(self, samples: dict, upscale_method: str, factor: float, crop: bool) -> dict: """Upscale the samples if the upscale_method is not set to 'None'.""" if upscale_method != "None": samples = self.upscale(samples, upscale_method, factor, crop)[0] return samples def init_state(self, my_unique_id: Any, key: str, default: Any) -> Any: """Initialize the state by either fetching the stored value or setting a default.""" value = self.get_value_by_id(key, my_unique_id) if value is not None: return value return default def get_output(self, pipe: dict,) -> Tuple: """Return a tuple of various elements fetched from the input pipe dictionary.""" return ( pipe, pipe.get("images"), pipe.get("model"), pipe.get("positive"), pipe.get("negative"), pipe.get("samples"), pipe.get("vae"), pipe.get("clip"), pipe.get("seed"), ) def get_output_sdxl(self, sdxl_pipe: dict) -> Tuple: """Return a tuple of various elements fetched from the input sdxl_pipe dictionary.""" return ( sdxl_pipe, sdxl_pipe.get("model"), sdxl_pipe.get("positive"), sdxl_pipe.get("negative"), sdxl_pipe.get("vae"), sdxl_pipe.get("refiner_model"), sdxl_pipe.get("refiner_positive"), sdxl_pipe.get("refiner_negative"), sdxl_pipe.get("refiner_vae"), sdxl_pipe.get("samples"), sdxl_pipe.get("clip"), sdxl_pipe.get("images"), sdxl_pipe.get("seed") ) # XY图表 class easyXYPlot: def __init__(self, xyPlotData, save_prefix, image_output, prompt, extra_pnginfo, my_unique_id): self.x_node_type, self.x_type = easySampler.safe_split(xyPlotData.get("x_axis"), ': ') self.y_node_type, self.y_type = easySampler.safe_split(xyPlotData.get("y_axis"), ': ') self.x_values = xyPlotData.get("x_vals") if self.x_type != "None" else [] self.y_values = xyPlotData.get("y_vals") if self.y_type != "None" else [] self.grid_spacing = xyPlotData.get("grid_spacing") self.latent_id = 0 self.output_individuals = xyPlotData.get("output_individuals") self.x_label, self.y_label = [], [] self.max_width, self.max_height = 0, 0 self.latents_plot = [] self.image_list = [] self.num_cols = len(self.x_values) if len(self.x_values) > 0 else 1 self.num_rows = len(self.y_values) if len(self.y_values) > 0 else 1 self.total = self.num_cols * self.num_rows self.num = 0 self.save_prefix = save_prefix self.image_output = image_output self.prompt = prompt self.extra_pnginfo = extra_pnginfo self.my_unique_id = my_unique_id # Helper Functions @staticmethod def define_variable(plot_image_vars, value_type, value, index): plot_image_vars[value_type] = value if value_type in ["seed", "Seeds++ Batch"]: value_label = f"{value}" else: value_label = f"{value_type}: {value}" if "ControlNet" in value_type: if "," in value: line = value.split(',') value_label = f"{value_type}: {line[2]}" if value_type in ["ModelMergeBlocks"]: if ":" in value: line = value.split(':') value_label = f"{line[0]}" elif len(value) > 16: value_label = f"ModelMergeBlocks {index + 1}" else: value_label = f"MMB: {value}" if value_type in ["Positive Prompt S/R"]: value_label = f"pos prompt {index + 1}" if index>0 else f"pos prompt" if value_type in ["Negative Prompt S/R"]: value_label = f"neg prompt {index + 1}" if index>0 else f"neg prompt" if value_type in ["steps", "cfg", "denoise", "clip_skip", "lora_model_strength", "lora_clip_strength"]: value_label = f"{value_type}: {value}" if value_type == "positive": value_label = f"pos prompt {index + 1}" elif value_type == "negative": value_label = f"neg prompt {index + 1}" return plot_image_vars, value_label @staticmethod def get_font(font_size): return ImageFont.truetype(str(Path(os.path.join(Path(__file__).parent.parent, 'resources/OpenSans-Medium.ttf'))), font_size) @staticmethod def update_label(label, value, num_items): if len(label) < num_items: return [*label, value] return label @staticmethod def rearrange_tensors(latent, num_cols, num_rows): new_latent = [] for i in range(num_rows): for j in range(num_cols): index = j * num_rows + i new_latent.append(latent[index]) return new_latent def calculate_background_dimensions(self): border_size = int((self.max_width // 8) * 1.5) if self.y_type != "None" or self.x_type != "None" else 0 bg_width = self.num_cols * (self.max_width + self.grid_spacing) - self.grid_spacing + border_size * ( self.y_type != "None") bg_height = self.num_rows * (self.max_height + self.grid_spacing) - self.grid_spacing + border_size * ( self.x_type != "None") x_offset_initial = border_size if self.y_type != "None" else 0 y_offset = border_size if self.x_type != "None" else 0 return bg_width, bg_height, x_offset_initial, y_offset def adjust_font_size(self, text, initial_font_size, label_width): font = self.get_font(initial_font_size) text_width, _ = font.getsize(text) scaling_factor = 0.9 if text_width > (label_width * scaling_factor): return int(initial_font_size * (label_width / text_width) * scaling_factor) else: return initial_font_size def create_label(self, img, text, initial_font_size, is_x_label=True, max_font_size=70, min_font_size=10): label_width = img.width if is_x_label else img.height # Adjust font size font_size = self.adjust_font_size(text, initial_font_size, label_width) font_size = min(max_font_size, font_size) # Ensure font isn't too large font_size = max(min_font_size, font_size) # Ensure font isn't too small label_height = int(font_size * 1.5) if is_x_label else font_size label_bg = Image.new('RGBA', (label_width, label_height), color=(255, 255, 255, 0)) d = ImageDraw.Draw(label_bg) font = self.get_font(font_size) # Check if text will fit, if not insert ellipsis and reduce text if d.textsize(text, font=font)[0] > label_width: while d.textsize(text + '...', font=font)[0] > label_width and len(text) > 0: text = text[:-1] text = text + '...' # Compute text width and height for multi-line text text_lines = text.split('\n') text_widths, text_heights = zip(*[d.textsize(line, font=font) for line in text_lines]) max_text_width = max(text_widths) total_text_height = sum(text_heights) # Compute position for each line of text lines_positions = [] current_y = 0 for line, line_width, line_height in zip(text_lines, text_widths, text_heights): text_x = (label_width - line_width) // 2 text_y = current_y + (label_height - total_text_height) // 2 current_y += line_height lines_positions.append((line, (text_x, text_y))) # Draw each line of text for line, (text_x, text_y) in lines_positions: d.text((text_x, text_y), line, fill='black', font=font) return label_bg def sample_plot_image(self, plot_image_vars, samples, preview_latent, latents_plot, image_list, disable_noise, start_step, last_step, force_full_denoise, x_value=None, y_value=None): model, clip, vae, positive, negative, seed, steps, cfg = None, None, None, None, None, None, None, None sampler_name, scheduler, denoise = None, None, None # 高级用法 if plot_image_vars["x_node_type"] == "advanced" or plot_image_vars["y_node_type"] == "advanced": if self.x_type == "Seeds++ Batch" or self.y_type == "Seeds++ Batch": seed = int(x_value) if self.x_type == "Seeds++ Batch" else int(y_value) if self.x_type == "Steps" or self.y_type == "Steps": steps = int(x_value) if self.x_type == "Steps" else int(y_value) if self.x_type == "StartStep" or self.y_type == "StartStep": start_step = int(x_value) if self.x_type == "StartStep" else int(y_value) if self.x_type == "EndStep" or self.y_type == "EndStep": last_step = int(x_value) if self.x_type == "EndStep" else int(y_value) if self.x_type == "CFG Scale" or self.y_type == "CFG Scale": cfg = float(x_value) if self.x_type == "CFG Scale" else float(y_value) if self.x_type == "Sampler" or self.y_type == "Sampler" or self.y_type == "Sampler & Scheduler": sampler_name = float(x_value) if self.x_type == "Sampler" or self.x_type == "Sampler & Scheduler" else float(y_value) if self.x_type == "Scheduler" or self.y_type == "Scheduler" or self.y_type == "Sampler & Scheduler": scheduler = float(x_value) if self.x_type == "Scheduler" or self.x_type == "Sampler & Scheduler" else float(y_value) if self.x_type == "Denoise" or self.y_type == "Denoise": denoise = float(x_value) if self.x_type == "Denoise" else float(y_value) # 模型叠加 if self.x_type == "ModelMergeBlocks" or self.y_type == "ModelMergeBlocks": ckpt_name_1, ckpt_name_2 = plot_image_vars['models'] model1, clip1, vae1 = easyCache.load_checkpoint(ckpt_name_1) model2, clip2, vae2 = easyCache.load_checkpoint(ckpt_name_2) xy_values = x_value if self.x_type == "ModelMergeBlocks" else y_value if ":" in xy_values: xy_line = xy_values.split(':') xy_values = xy_line[1] xy_arrs = xy_values.split(',') # ModelMergeBlocks if len(xy_arrs) == 3: input, middle, out = xy_arrs kwargs = { "input": input, "middle": middle, "out": out } elif len(xy_arrs) == 30: kwargs = {} kwargs["time_embed."] = xy_arrs[0] kwargs["label_emb."] = xy_arrs[1] for i in range(12): kwargs["input_blocks.{}.".format(i)] = xy_arrs[2+i] for i in range(3): kwargs["middle_block.{}.".format(i)] = xy_arrs[14+i] for i in range(12): kwargs["output_blocks.{}.".format(i)] = xy_arrs[17+i] kwargs["out."] = xy_arrs[29] else: raise Exception("ModelMergeBlocks weight length error") default_ratio = next(iter(kwargs.values())) m = model1.clone() kp = model2.get_key_patches("diffusion_model.") for k in kp: ratio = float(default_ratio) k_unet = k[len("diffusion_model."):] last_arg_size = 0 for arg in kwargs: if k_unet.startswith(arg) and last_arg_size < len(arg): ratio = float(kwargs[arg]) last_arg_size = len(arg) m.add_patches({k: kp[k]}, 1.0 - ratio, ratio) vae_use = plot_image_vars['vae_use'] clip = clip2 if vae_use == 'Use Model 2' else clip1 if vae_use == 'Use Model 2': vae = vae2 elif vae_use == 'Use Model 1': vae = vae1 else: (vae,) = VAELoader().load_vae(vae_use) model = m # 如果存在lora_stack叠加lora optional_lora_stack = plot_image_vars['lora_stack'] if optional_lora_stack is not None and optional_lora_stack != []: for lora in optional_lora_stack: lora_name = lora["lora_name"] model = model if model is not None else lora["model"] clip = clip if clip is not None else lora["clip"] lora_model_strength = lora["lora_model_strength"] lora_clip_strength = lora["lora_clip_strength"] if "lbw" in lora: lbw = lora["lbw"] lbw_a = lora["lbw_a"] lbw_b = lora["lbw_b"] cls = ALL_NODE_CLASS_MAPPINGS['LoraLoaderBlockWeight //Inspire'] model, clip, _ = cls().doit(model, clip, lora_name, lora_model_strength, lora_clip_strength, False, 0, lbw_a, lbw_b, "", lbw) model, clip = easyCache.load_lora(lora_name, model, clip, lora_model_strength, lora_clip_strength) # 处理clip clip = clip.clone() if plot_image_vars['clip_skip'] != 0: clip.clip_layer(plot_image_vars['clip_skip']) # 提示词 if "Positive" in self.x_type or "Positive" in self.y_type: if self.x_type == 'Positive Prompt S/R' or self.y_type == 'Positive Prompt S/R': positive = x_value if self.x_type == "Positive Prompt S/R" else y_value if plot_image_vars['a1111_prompt_style']: if "smZ CLIPTextEncode" in ALL_NODE_CLASS_MAPPINGS: cls = ALL_NODE_CLASS_MAPPINGS['smZ CLIPTextEncode'] steps = plot_image_vars['steps'] clip = clip if clip is not None else plot_image_vars["clip"] positive, = cls().encode(clip, positive, "A1111", True, True, False, False, 6, 1024, 1024, 0, 0, 1024, 1024, '', '', steps) else: raise Exception( f"[ERROR] To use clip text encode same as webui, you need to install 'smzNodes'") else: clip = clip if clip is not None else plot_image_vars["clip"] positive, positive_pooled = advanced_encode(clip, positive, plot_image_vars['positive_token_normalization'], plot_image_vars[ 'positive_weight_interpretation'], w_max=1.0, apply_to_pooled="enable") positive = [[positive, {"pooled_output": positive_pooled}]] if "Negative" in self.x_type or "Negative" in self.y_type: if self.x_type == 'Negative Prompt S/R' or self.y_type == 'Negative Prompt S/R': negative = x_value if self.x_type == "Negative Prompt S/R" else y_value if plot_image_vars['a1111_prompt_style']: if "smZ CLIPTextEncode" in ALL_NODE_CLASS_MAPPINGS: cls = ALL_NODE_CLASS_MAPPINGS['smZ CLIPTextEncode'] steps = plot_image_vars['steps'] clip = clip if clip is not None else plot_image_vars["clip"] negative, = cls().encode(clip, negative, "A1111", True, True, False, False, 6, 1024, 1024, 0, 0, 1024, 1024, '', '', steps) else: raise Exception( f"[ERROR] To use clip text encode same as webui, you need to install 'smzNodes'") else: clip = clip if clip is not None else plot_image_vars["clip"] negative, negative_pooled = advanced_encode(clip, negative, plot_image_vars['negative_token_normalization'], plot_image_vars[ 'negative_weight_interpretation'], w_max=1.0, apply_to_pooled="enable") negative = [[negative, {"pooled_output": negative_pooled}]] # ControlNet if "ControlNet" in self.x_type or "ControlNet" in self.y_type: _pipe = { "model": model if model is not None else plot_image_vars["model"], "positive": positive if positive is not None else plot_image_vars["positive_cond"], "negative": negative if negative is not None else plot_image_vars["negative_cond"], "vae": vae if vae is not None else plot_image_vars['vae'], "clip": clip if clip is not None else plot_image_vars['clip'], "samples": None, "images": None, "loader_settings": {} } cnet = plot_image_vars["cnet"] if "cnet" in plot_image_vars else None if cnet: strength, start_percent, end_percent = x_value.split(',') if "ControlNet" in self.x_type else y_value.split(',') strength = float(strength) start_percent = float(start_percent) end_percent = float(end_percent) for index, item in enumerate(cnet): control_net_names = item[0] image = item[1] for idx, control_net_name in enumerate(control_net_names): # print(control_net_name) _pipe, = controlnetAdvanced().controlnetApply(_pipe, image, control_net_name, None, strength, start_percent, end_percent) positive = _pipe['positive'] negative = _pipe['negative'] del _pipe # 简单用法 if plot_image_vars["x_node_type"] == "loader" or plot_image_vars["y_node_type"] == "loader": model, clip, vae = easyCache.load_checkpoint(plot_image_vars['ckpt_name']) if plot_image_vars['lora_name'] != "None": model, clip = easyCache.load_lora(plot_image_vars['lora_name'], model, clip, plot_image_vars['lora_model_strength'], plot_image_vars['lora_clip_strength']) # Check for custom VAE if plot_image_vars['vae_name'] not in ["Baked-VAE", "Baked VAE"]: vae = easyCache.load_vae(plot_image_vars['vae_name']) # CLIP skip if not clip: raise Exception("No CLIP found") clip = clip.clone() clip.clip_layer(plot_image_vars['clip_skip']) if plot_image_vars['a1111_prompt_style']: if "smZ CLIPTextEncode" in ALL_NODE_CLASS_MAPPINGS: cls = ALL_NODE_CLASS_MAPPINGS['smZ CLIPTextEncode'] steps = plot_image_vars['steps'] positive, = cls().encode(clip, plot_image_vars['positive'], "A1111", True, True, False, False, 6, 1024, 1024, 0, 0, 1024, 1024, '', '', steps) negative, = cls().encode(clip, plot_image_vars['negative'], "A1111", True, True, False, False, 6, 1024, 1024, 0, 0, 1024, 1024, '', '', steps) else: raise Exception(f"[ERROR] To use clip text encode same as webui, you need to install 'smzNodes'") else: positive, positive_pooled = advanced_encode(clip, plot_image_vars['positive'], plot_image_vars['positive_token_normalization'], plot_image_vars['positive_weight_interpretation'], w_max=1.0, apply_to_pooled="enable") positive = [[positive, {"pooled_output": positive_pooled}]] negative, negative_pooled = advanced_encode(clip, plot_image_vars['negative'], plot_image_vars['negative_token_normalization'], plot_image_vars['negative_weight_interpretation'], w_max=1.0, apply_to_pooled="enable") negative = [[negative, {"pooled_output": negative_pooled}]] model = model if model is not None else plot_image_vars["model"] clip = clip if clip is not None else plot_image_vars["clip"] vae = vae if vae is not None else plot_image_vars["vae"] positive = positive if positive is not None else plot_image_vars["positive_cond"] negative = negative if negative is not None else plot_image_vars["negative_cond"] seed = seed if seed is not None else plot_image_vars["seed"] steps = steps if steps is not None else plot_image_vars["steps"] cfg = cfg if cfg is not None else plot_image_vars["cfg"] sampler_name = sampler_name if sampler_name is not None else plot_image_vars["sampler_name"] scheduler = scheduler if scheduler is not None else plot_image_vars["scheduler"] denoise = denoise if denoise is not None else plot_image_vars["denoise"] # Sample samples = sampler.common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, samples, denoise=denoise, disable_noise=disable_noise, preview_latent=preview_latent, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise) # Decode images and store latent = samples["samples"] # Add the latent tensor to the tensors list latents_plot.append(latent) # Decode the image image = vae.decode(latent).cpu() if self.output_individuals in [True, "True"]: easy_save = easySave(self.my_unique_id, self.prompt, self.extra_pnginfo) easy_save.images(image, self.save_prefix, self.image_output, group_id=self.num) # Convert the image from tensor to PIL Image and add it to the list pil_image = easySampler.tensor2pil(image) image_list.append(pil_image) # Update max dimensions self.max_width = max(self.max_width, pil_image.width) self.max_height = max(self.max_height, pil_image.height) # Return the touched variables return image_list, self.max_width, self.max_height, latents_plot # Process Functions def validate_xy_plot(self): if self.x_type == 'None' and self.y_type == 'None': log_node_warn(f'easyKsampler[{self.my_unique_id}]','No Valid Plot Types - Reverting to default sampling...') return False else: return True def get_latent(self, samples): # Extract the 'samples' tensor from the dictionary latent_image_tensor = samples["samples"] # Split the tensor into individual image tensors image_tensors = torch.split(latent_image_tensor, 1, dim=0) # Create a list of dictionaries containing the individual image tensors latent_list = [{'samples': image} for image in image_tensors] # Set latent only to the first latent of batch if self.latent_id >= len(latent_list): log_node_warn(f'easy kSampler[{self.my_unique_id}]',f'The selected latent_id ({self.latent_id}) is out of range.') log_node_warn(f'easy kSampler[{self.my_unique_id}]', f'Automatically setting the latent_id to the last image in the list (index: {len(latent_list) - 1}).') self.latent_id = len(latent_list) - 1 return latent_list[self.latent_id] def get_labels_and_sample(self, plot_image_vars, latent_image, preview_latent, start_step, last_step, force_full_denoise, disable_noise): for x_index, x_value in enumerate(self.x_values): plot_image_vars, x_value_label = self.define_variable(plot_image_vars, self.x_type, x_value, x_index) self.x_label = self.update_label(self.x_label, x_value_label, len(self.x_values)) if self.y_type != 'None': for y_index, y_value in enumerate(self.y_values): plot_image_vars, y_value_label = self.define_variable(plot_image_vars, self.y_type, y_value, y_index) self.y_label = self.update_label(self.y_label, y_value_label, len(self.y_values)) # ttNl(f'{CC.GREY}X: {x_value_label}, Y: {y_value_label}').t( # f'Plot Values {self.num}/{self.total} ->').p() self.image_list, self.max_width, self.max_height, self.latents_plot = self.sample_plot_image( plot_image_vars, latent_image, preview_latent, self.latents_plot, self.image_list, disable_noise, start_step, last_step, force_full_denoise, x_value, y_value) self.num += 1 else: # ttNl(f'{CC.GREY}X: {x_value_label}').t(f'Plot Values {self.num}/{self.total} ->').p() self.image_list, self.max_width, self.max_height, self.latents_plot = self.sample_plot_image( plot_image_vars, latent_image, preview_latent, self.latents_plot, self.image_list, disable_noise, start_step, last_step, force_full_denoise, x_value) self.num += 1 # Rearrange latent array to match preview image grid self.latents_plot = self.rearrange_tensors(self.latents_plot, self.num_cols, self.num_rows) # Concatenate the tensors along the first dimension (dim=0) self.latents_plot = torch.cat(self.latents_plot, dim=0) return self.latents_plot def plot_images_and_labels(self): # Calculate the background dimensions bg_width, bg_height, x_offset_initial, y_offset = self.calculate_background_dimensions() # Create the white background image background = Image.new('RGBA', (int(bg_width), int(bg_height)), color=(255, 255, 255, 255)) output_image = [] for row_index in range(self.num_rows): x_offset = x_offset_initial for col_index in range(self.num_cols): index = col_index * self.num_rows + row_index img = self.image_list[index] output_image.append(sampler.pil2tensor(img)) background.paste(img, (x_offset, y_offset)) # Handle X label if row_index == 0 and self.x_type != "None": label_bg = self.create_label(img, self.x_label[col_index], int(48 * img.width / 512)) label_y = (y_offset - label_bg.height) // 2 background.alpha_composite(label_bg, (x_offset, label_y)) # Handle Y label if col_index == 0 and self.y_type != "None": label_bg = self.create_label(img, self.y_label[row_index], int(48 * img.height / 512), False) label_bg = label_bg.rotate(90, expand=True) label_x = (x_offset - label_bg.width) // 2 label_y = y_offset + (img.height - label_bg.height) // 2 background.alpha_composite(label_bg, (label_x, label_y)) x_offset += img.width + self.grid_spacing y_offset += img.height + self.grid_spacing return (sampler.pil2tensor(background), output_image) easyCache = easyLoader() sampler = easySampler() def check_link_to_clip(node_id, clip_id, visited=None, node=None): """Check if a given node links directly or indirectly to a loader node.""" if visited is None: visited = set() if node_id in visited: return False visited.add(node_id) if "pipe" in node["inputs"]: link_ids = node["inputs"]["pipe"] for id in link_ids: if id != 0 and id == str(clip_id): return True return False def find_nearest_steps(clip_id, prompt): """Find the nearest KSampler or preSampling node that references the given id.""" for id in prompt: node = prompt[id] if "Sampler" in node["class_type"] or "sampler" in node["class_type"] or "Sampling" in node["class_type"]: # Check if this KSampler node directly or indirectly references the given CLIPTextEncode node if check_link_to_clip(id, clip_id, None, node): steps = node["inputs"]["steps"] if "steps" in node["inputs"] else 1 return steps return 1 def find_wildcards_seed(text, prompt): if "__" in text: for i in prompt: if "wildcards" in prompt[i]['class_type'] and text == prompt[i]['inputs']['text']: return prompt[i]['inputs']['seed_num'] if "seed_num" in prompt[i]['inputs'] else None else: return None class easySave: def __init__(self, my_unique_id=0, prompt=None, extra_pnginfo=None, number_padding=5, overwrite_existing=False, output_dir=folder_paths.get_temp_directory()): self.number_padding = int(number_padding) if number_padding not in [None, "None", 0] else None self.overwrite_existing = overwrite_existing self.my_unique_id = my_unique_id self.prompt = prompt self.extra_pnginfo = extra_pnginfo self.type = 'temp' self.output_dir = output_dir if self.output_dir != folder_paths.get_temp_directory(): self.output_dir = self.folder_parser(self.output_dir, self.prompt, self.my_unique_id) if not os.path.exists(self.output_dir): self._create_directory(self.output_dir) @staticmethod def _create_directory(folder: str): """Try to create the directory and log the status.""" log_node_warn("", f"Folder {folder} does not exist. Attempting to create...") if not os.path.exists(folder): try: os.makedirs(folder) log_node_success("",f"{folder} Created Successfully") except OSError: log_node_error(f"Failed to create folder {folder}") pass @staticmethod def _map_filename(filename: str, filename_prefix: str) -> Tuple[int, str, Optional[int]]: """Utility function to map filename to its parts.""" # Get the prefix length and extract the prefix prefix_len = len(os.path.basename(filename_prefix)) prefix = filename[:prefix_len] # Search for the primary digits digits = re.search(r'(\d+)', filename[prefix_len:]) # Search for the number in brackets after the primary digits group_id = re.search(r'\((\d+)\)', filename[prefix_len:]) return (int(digits.group()) if digits else 0, prefix, int(group_id.group(1)) if group_id else 0) @staticmethod def _format_date(text: str, date: datetime.datetime) -> str: """Format the date according to specific patterns.""" date_formats = { 'd': lambda d: d.day, 'dd': lambda d: '{:02d}'.format(d.day), 'M': lambda d: d.month, 'MM': lambda d: '{:02d}'.format(d.month), 'h': lambda d: d.hour, 'hh': lambda d: '{:02d}'.format(d.hour), 'm': lambda d: d.minute, 'mm': lambda d: '{:02d}'.format(d.minute), 's': lambda d: d.second, 'ss': lambda d: '{:02d}'.format(d.second), 'y': lambda d: d.year, 'yy': lambda d: str(d.year)[2:], 'yyy': lambda d: str(d.year)[1:], 'yyyy': lambda d: d.year, } # We need to sort the keys in reverse order to ensure we match the longest formats first for format_str in sorted(date_formats.keys(), key=len, reverse=True): if format_str in text: text = text.replace(format_str, str(date_formats[format_str](date))) return text @staticmethod def _gather_all_inputs(prompt: Dict[str, dict], unique_id: str, linkInput: str = '', collected_inputs: Optional[Dict[str, Union[str, List[str]]]] = None) -> Dict[ str, Union[str, List[str]]]: """Recursively gather all inputs from the prompt dictionary.""" if prompt == None: return None collected_inputs = collected_inputs or {} prompt_inputs = prompt[str(unique_id)]["inputs"] for p_input, p_input_value in prompt_inputs.items(): a_input = f"{linkInput}>{p_input}" if linkInput else p_input if isinstance(p_input_value, list): easySave._gather_all_inputs(prompt, p_input_value[0], a_input, collected_inputs) else: existing_value = collected_inputs.get(a_input) if existing_value is None: collected_inputs[a_input] = p_input_value elif p_input_value not in existing_value: collected_inputs[a_input] = existing_value + "; " + p_input_value # if "text" in collected_inputs: # del collected_inputs['text'] # print(collected_inputs) return collected_inputs @staticmethod def _get_filename_with_padding(output_dir, filename, number_padding, group_id, ext): """Return filename with proper padding.""" try: filtered = list(filter(lambda a: a[1] == filename, map(lambda x: easySave._map_filename(x, filename), os.listdir(output_dir)))) last = max(filtered)[0] for f in filtered: if f[0] == last: if f[2] == 0 or f[2] == group_id: last += 1 counter = last except (ValueError, FileNotFoundError): os.makedirs(output_dir, exist_ok=True) counter = 1 if group_id == 0: return f"{filename}.{ext}" if number_padding is None else f"{filename}_{counter:0{number_padding}}.{ext}" else: return f"{filename}_({group_id}).{ext}" if number_padding is None else f"{filename}_{counter:0{number_padding}}_({group_id}).{ext}" @staticmethod def filename_parser(output_dir: str, filename_prefix: str, prompt: Dict[str, dict], my_unique_id: str, number_padding: int, group_id: int, ext: str) -> str: """Parse the filename using provided patterns and replace them with actual values.""" subfolder = os.path.dirname(os.path.normpath(filename_prefix)) filename = os.path.basename(os.path.normpath(filename_prefix)) filename = re.sub(r'%date:(.*?)%', lambda m: easySave._format_date(m.group(1), datetime.datetime.now()), filename_prefix) all_inputs = easySave._gather_all_inputs(prompt, my_unique_id) filename = re.sub(r'%(.*?)%', lambda m: str(all_inputs.get(m.group(1), '')), filename) filename = re.sub(r'[/\\]+', '-', filename) filename = easySave._get_filename_with_padding(output_dir, filename, number_padding, group_id, ext) return filename, subfolder @staticmethod def folder_parser(output_dir: str, prompt: Dict[str, dict], my_unique_id: str): output_dir = re.sub(r'%date:(.*?)%', lambda m: easySave._format_date(m.group(1), datetime.datetime.now()), output_dir) all_inputs = easySave._gather_all_inputs(prompt, my_unique_id) return re.sub(r'%(.*?)%', lambda m: str(all_inputs.get(m.group(1), '')), output_dir) def images(self, images, filename_prefix, output_type, embed_workflow=True, ext="png", group_id=0): FORMAT_MAP = { "png": "PNG", "jpg": "JPEG", "jpeg": "JPEG", "bmp": "BMP", "tif": "TIFF", "tiff": "TIFF" } if ext not in FORMAT_MAP: raise ValueError(f"Unsupported file extension {ext}") if output_type == "Hide": return list() if output_type in ("Save", "Hide/Save", "Sender/Save"): output_dir = self.output_dir if self.output_dir != folder_paths.get_temp_directory() else folder_paths.get_output_directory() self.type = "output" if output_type in ("Preview", "Sender"): output_dir = self.output_dir filename_prefix = 'easyPreview' results = list() for image in images: img = Image.fromarray(np.clip(255. * image.cpu().numpy(), 0, 255).astype(np.uint8)) filename = filename_prefix.replace("%width%", str(img.size[0])).replace("%height%", str(img.size[1])) filename, subfolder = easySave.filename_parser(output_dir, filename, self.prompt, self.my_unique_id, self.number_padding, group_id, ext) file_path = os.path.join(output_dir, filename) if ext == "png" and embed_workflow in (True, "True"): metadata = PngInfo() if self.prompt is not None: metadata.add_text("prompt", json.dumps(self.prompt)) if hasattr(self, 'extra_pnginfo') and self.extra_pnginfo is not None: for key, value in self.extra_pnginfo.items(): metadata.add_text(key, json.dumps(value)) if self.overwrite_existing or not os.path.isfile(file_path): img.save(file_path, pnginfo=metadata, format=FORMAT_MAP[ext]) else: if self.overwrite_existing or not os.path.isfile(file_path): img.save(file_path, format=FORMAT_MAP[ext]) else: log_node_error("",f"File {file_path} already exists... Skipping") results.append({ "filename": filename, "subfolder": subfolder, "type": self.type }) return results def textfile(self, text, filename_prefix, output_type, group_id=0, ext='txt'): if output_type == "Hide": return [] if output_type in ("Save", "Hide/Save"): output_dir = self.output_dir if self.output_dir != folder_paths.get_temp_directory() else folder_paths.get_output_directory() if output_type == "Preview": filename_prefix = 'easyPreview' filename = easySave.filename_parser(output_dir, filename_prefix, self.prompt, self.my_unique_id, self.number_padding, group_id, ext) file_path = os.path.join(output_dir, filename) if self.overwrite_existing or not os.path.isfile(file_path): with open(file_path, 'w') as f: f.write(text) else: log_node_error("", f"File {file_path} already exists... Skipping") # ---------------------------------------------------------------提示词 开始----------------------------------------------------------------------# # 正面提示词 class positivePrompt: def __init__(self): pass @classmethod def INPUT_TYPES(s): return {"required": { "positive": ("STRING", {"default": "", "multiline": True, "placeholder": "Positive"}),} } RETURN_TYPES = ("STRING",) RETURN_NAMES = ("positive",) FUNCTION = "main" CATEGORY = "EasyUse/Prompt" @staticmethod def main(positive): return positive, # 通配符提示词 class wildcardsPrompt: def __init__(self): pass @classmethod def INPUT_TYPES(s): wildcard_list = get_wildcard_list() return {"required": { "text": ("STRING", {"default": "", "multiline": True, "dynamicPrompts": False, "placeholder": "(Support Lora Block Weight and wildcard)"}), "Select to add LoRA": (["Select the LoRA to add to the text"] + folder_paths.get_filename_list("loras"),), "Select to add Wildcard": (["Select the Wildcard to add to the text"] + wildcard_list,), "seed_num": ("INT", {"default": 0, "min": 0, "max": 1125899906842624}), }, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID"}, } RETURN_TYPES = ("STRING",) RETURN_NAMES = ("text",) OUTPUT_NODE = True FUNCTION = "main" CATEGORY = "EasyUse/Prompt" @staticmethod def main(*args, **kwargs): my_unique_id = kwargs["my_unique_id"] extra_pnginfo = kwargs["extra_pnginfo"] prompt = kwargs["prompt"] seed_num = kwargs["seed_num"] # Clean loaded_objects easyCache.update_loaded_objects(prompt) my_unique_id = int(my_unique_id) easy_save = easySave(my_unique_id, prompt, extra_pnginfo) # if my_unique_id: # workflow = extra_pnginfo["workflow"] # node = next((x for x in workflow["nodes"] if str(x["id"]) == my_unique_id), None) # if node: # seed_num = prompt[my_unique_id]['inputs']['seed_num'] if 'seed_num' in prompt[my_unique_id][ # 'inputs'] else 0 # length = len(node["widgets_values"]) # node["widgets_values"][length - 2] = seed_num text = kwargs['text'] return {"ui": {"value": [seed_num]}, "result": (text,)} # 负面提示词 class negativePrompt: def __init__(self): pass @classmethod def INPUT_TYPES(s): return {"required": { "negative": ("STRING", {"default": "", "multiline": True, "placeholder": "Negative"}),} } RETURN_TYPES = ("STRING",) RETURN_NAMES = ("negative",) FUNCTION = "main" CATEGORY = "EasyUse/Prompt" @staticmethod def main(negative): return negative, # 肖像大师 # Created by AI Wiz Art (Stefano Flore) # Version: 2.2 # https://stefanoflore.it # https://ai-wiz.art class portraitMaster: @classmethod def INPUT_TYPES(s): max_float_value = 1.95 prompt_path = Path(os.path.join(Path(__file__).parent.parent, 'resources/portrait_prompt.json')) if not os.path.exists(prompt_path): response = urlopen('https://raw.githubusercontent.com/yolain/ComfyUI-Easy-Use/main/resources/portrait_prompt.json') temp_prompt = json.loads(response.read()) prompt_serialized = json.dumps(temp_prompt, indent=4) with open(prompt_path, "w") as f: f.write(prompt_serialized) del response, temp_prompt # Load local with open(prompt_path, 'r') as f: list = json.load(f) keys = [ ['shot', 'COMBO', {"key": "shot_list"}], ['shot_weight', 'FLOAT'], ['gender', 'COMBO', {"default": "Woman", "key": "gender_list"}], ['age', 'INT', {"default": 30, "min": 18, "max": 90, "step": 1, "display": "slider"}], ['nationality_1', 'COMBO', {"default": "Chinese", "key": "nationality_list"}], ['nationality_2', 'COMBO', {"key": "nationality_list"}], ['nationality_mix', 'FLOAT'], ['body_type', 'COMBO', {"key": "body_type_list"}], ['body_type_weight', 'FLOAT'], ['model_pose', 'COMBO', {"key": "model_pose_list"}], ['eyes_color', 'COMBO', {"key": "eyes_color_list"}], ['facial_expression', 'COMBO', {"key": "face_expression_list"}], ['facial_expression_weight', 'FLOAT'], ['face_shape', 'COMBO', {"key": "face_shape_list"}], ['face_shape_weight', 'FLOAT'], ['facial_asymmetry', 'FLOAT'], ['hair_style', 'COMBO', {"key": "hair_style_list"}], ['hair_color', 'COMBO', {"key": "hair_color_list"}], ['disheveled', 'FLOAT'], ['beard', 'COMBO', {"key": "beard_list"}], ['skin_details', 'FLOAT'], ['skin_pores', 'FLOAT'], ['dimples', 'FLOAT'], ['freckles', 'FLOAT'], ['moles', 'FLOAT'], ['skin_imperfections', 'FLOAT'], ['skin_acne', 'FLOAT'], ['tanned_skin', 'FLOAT'], ['eyes_details', 'FLOAT'], ['iris_details', 'FLOAT'], ['circular_iris', 'FLOAT'], ['circular_pupil', 'FLOAT'], ['light_type', 'COMBO', {"key": "light_type_list"}], ['light_direction', 'COMBO', {"key": "light_direction_list"}], ['light_weight', 'FLOAT'] ] widgets = {} for i, obj in enumerate(keys): if obj[1] == 'COMBO': key = obj[2]['key'] if obj[2] and 'key' in obj[2] else obj[0] _list = list[key].copy() _list.insert(0, '-') widgets[obj[0]] = (_list, {**obj[2]}) elif obj[1] == 'FLOAT': widgets[obj[0]] = ("FLOAT", {"default": 0, "step": 0.05, "min": 0, "max": max_float_value, "display": "slider",}) elif obj[1] == 'INT': widgets[obj[0]] = (obj[1], obj[2]) del list return { "required": { **widgets, "photorealism_improvement": (["enable", "disable"],), "prompt_start": ("STRING", {"multiline": True, "default": "raw photo, (realistic:1.5)"}), "prompt_additional": ("STRING", {"multiline": True, "default": ""}), "prompt_end": ("STRING", {"multiline": True, "default": ""}), "negative_prompt": ("STRING", {"multiline": True, "default": ""}), } } RETURN_TYPES = ("STRING", "STRING",) RETURN_NAMES = ("positive", "negative",) FUNCTION = "pm" CATEGORY = "EasyUse/Prompt" def pm(self, shot="-", shot_weight=1, gender="-", body_type="-", body_type_weight=0, eyes_color="-", facial_expression="-", facial_expression_weight=0, face_shape="-", face_shape_weight=0, nationality_1="-", nationality_2="-", nationality_mix=0.5, age=30, hair_style="-", hair_color="-", disheveled=0, dimples=0, freckles=0, skin_pores=0, skin_details=0, moles=0, skin_imperfections=0, wrinkles=0, tanned_skin=0, eyes_details=1, iris_details=1, circular_iris=1, circular_pupil=1, facial_asymmetry=0, prompt_additional="", prompt_start="", prompt_end="", light_type="-", light_direction="-", light_weight=0, negative_prompt="", photorealism_improvement="disable", beard="-", model_pose="-", skin_acne=0): prompt = [] if gender == "-": gender = "" else: if age <= 25 and gender == 'Woman': gender = 'girl' if age <= 25 and gender == 'Man': gender = 'boy' gender = " " + gender + " " if nationality_1 != '-' and nationality_2 != '-': nationality = f"[{nationality_1}:{nationality_2}:{round(nationality_mix, 2)}]" elif nationality_1 != '-': nationality = nationality_1 + " " elif nationality_2 != '-': nationality = nationality_2 + " " else: nationality = "" if prompt_start != "": prompt.append(f"{prompt_start}") if shot != "-" and shot_weight > 0: prompt.append(f"({shot}:{round(shot_weight, 2)})") prompt.append(f"({nationality}{gender}{round(age)}-years-old:1.5)") if body_type != "-" and body_type_weight > 0: prompt.append(f"({body_type}, {body_type} body:{round(body_type_weight, 2)})") if model_pose != "-": prompt.append(f"({model_pose}:1.5)") if eyes_color != "-": prompt.append(f"({eyes_color} eyes:1.25)") if facial_expression != "-" and facial_expression_weight > 0: prompt.append( f"({facial_expression}, {facial_expression} expression:{round(facial_expression_weight, 2)})") if face_shape != "-" and face_shape_weight > 0: prompt.append(f"({face_shape} shape face:{round(face_shape_weight, 2)})") if hair_style != "-": prompt.append(f"({hair_style} hairstyle:1.25)") if hair_color != "-": prompt.append(f"({hair_color} hair:1.25)") if beard != "-": prompt.append(f"({beard}:1.15)") if disheveled != "-" and disheveled > 0: prompt.append(f"(disheveled:{round(disheveled, 2)})") if prompt_additional != "": prompt.append(f"{prompt_additional}") if skin_details > 0: prompt.append(f"(skin details, skin texture:{round(skin_details, 2)})") if skin_pores > 0: prompt.append(f"(skin pores:{round(skin_pores, 2)})") if skin_imperfections > 0: prompt.append(f"(skin imperfections:{round(skin_imperfections, 2)})") if skin_acne > 0: prompt.append(f"(acne, skin with acne:{round(skin_acne, 2)})") if wrinkles > 0: prompt.append(f"(skin imperfections:{round(wrinkles, 2)})") if tanned_skin > 0: prompt.append(f"(tanned skin:{round(tanned_skin, 2)})") if dimples > 0: prompt.append(f"(dimples:{round(dimples, 2)})") if freckles > 0: prompt.append(f"(freckles:{round(freckles, 2)})") if moles > 0: prompt.append(f"(skin pores:{round(moles, 2)})") if eyes_details > 0: prompt.append(f"(eyes details:{round(eyes_details, 2)})") if iris_details > 0: prompt.append(f"(iris details:{round(iris_details, 2)})") if circular_iris > 0: prompt.append(f"(circular iris:{round(circular_iris, 2)})") if circular_pupil > 0: prompt.append(f"(circular pupil:{round(circular_pupil, 2)})") if facial_asymmetry > 0: prompt.append(f"(facial asymmetry, face asymmetry:{round(facial_asymmetry, 2)})") if light_type != '-' and light_weight > 0: if light_direction != '-': prompt.append(f"({light_type} {light_direction}:{round(light_weight, 2)})") else: prompt.append(f"({light_type}:{round(light_weight, 2)})") if prompt_end != "": prompt.append(f"{prompt_end}") prompt = ", ".join(prompt) prompt = prompt.lower() if photorealism_improvement == "enable": prompt = prompt + ", (professional photo, balanced photo, balanced exposure:1.2), (film grain:1.15)" if photorealism_improvement == "enable": negative_prompt = negative_prompt + ", (shinny skin, reflections on the skin, skin reflections:1.25)" log_node_info("Portrait Master as generate the prompt:", prompt) return (prompt, negative_prompt,) # 潜空间sigma相乘 class latentMultiplyBySigma: @classmethod def INPUT_TYPES(s): return {"required": { "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), "scheduler": (comfy.samplers.KSampler.SCHEDULERS,), "steps": ("INT", {"default": 10000, "min": 0, "max": 10000}), "start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}), "end_at_step": ("INT", {"default": 10000, "min": 1, "max": 10000}), }, "optional": { "pipe": ("PIPE_LINE",), "optional_model": ("MODEL",), "optional_latent": ("LATENT",) }} RETURN_TYPES = ("PIPE_LINE", "LATENT", "FLOAT",) RETURN_NAMES = ("pipe", "latent", "sigma",) FUNCTION = "run" CATEGORY = "EasyUse/Latent" def run(self, sampler_name, scheduler, steps, start_at_step, end_at_step, pipe=None, optional_model=None, optional_latent=None): model = optional_model if optional_model is not None else pipe["model"] samples = optional_latent if optional_latent is not None else pipe["samples"] device = comfy.model_management.get_torch_device() end_at_step = min(steps, end_at_step) start_at_step = min(start_at_step, end_at_step) real_model = None comfy.model_management.load_model_gpu(model) real_model = model.model sampler = comfy.samplers.KSampler(real_model, steps=steps, device=device, sampler=sampler_name, scheduler=scheduler, denoise=1.0, model_options=model.model_options) sigmas = sampler.sigmas sigma = sigmas[start_at_step] - sigmas[end_at_step] sigma /= model.model.latent_format.scale_factor sigma = sigma.cpu().numpy() samples_out = samples.copy() s1 = samples["samples"] samples_out["samples"] = s1 * sigma if pipe is None: pipe = {} new_pipe = { **pipe, "samples": samples_out } del pipe return (new_pipe, samples_out, sigma) # Latent遮罩复合 class latentCompositeMaskedWithCond: @classmethod def INPUT_TYPES(s): return { "required": { "pipe": ("PIPE_LINE",), "text_combine": ("STRING", {"default": ""}), "source_latent": ("LATENT",), "source_mask": ("MASK",), "new_mask": ("MASK",), }, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID"}, } RETURN_TYPES = ("PIPE_LINE", "LATENT", "CONDITIONING") RETURN_NAMES = ("pipe", "latent", "conditioning",) FUNCTION = "run" OUTPUT_MODE = True CATEGORY = "EasyUse/Latent" def run(self, pipe, text_combine, source_latent, source_mask, new_mask, prompt=None, extra_pnginfo=None, my_unique_id=None): clip = pipe["clip"] destination_latent = pipe["samples"] positive = pipe["loader_settings"]["positive"] + ',' + text_combine positive_token_normalization = pipe["loader_settings"]["positive_token_normalization"] positive_weight_interpretation = pipe["loader_settings"]["positive_weight_interpretation"] a1111_prompt_style = pipe["loader_settings"]["a1111_prompt_style"] positive_cond = pipe["positive"] log_node_warn("正在处理提示词编码...") # Use new clip text encode by smzNodes like same as webui, when if you installed the smzNodes if a1111_prompt_style: if "smZ CLIPTextEncode" in ALL_NODE_CLASS_MAPPINGS: cls = ALL_NODE_CLASS_MAPPINGS['smZ CLIPTextEncode'] steps = pipe["steps"] positive_embeddings_final, = cls().encode(clip, positive, "A1111", True, True, False, False, 6, 1024, 1024, 0, 0, 1024, 1024, '', '', steps) else: raise Exception(f"[ERROR] To use clip text encode same as webui, you need to install 'smzNodes'") else: positive_embeddings_final, positive_pooled = advanced_encode(clip, positive, positive_token_normalization, positive_weight_interpretation, w_max=1.0, apply_to_pooled='enable') positive_embeddings_final = [[positive_embeddings_final, {"pooled_output": positive_pooled}]] # source cond (cond_1,) = ConditioningSetMask().append(positive_cond, source_mask, "default", 1) (cond_2,) = ConditioningSetMask().append(positive_embeddings_final, new_mask, "default", 1) positive_cond = cond_1 + cond_2 # latent composite masked (samples,) = LatentCompositeMasked().composite(destination_latent, source_latent, 0, 0, False) new_pipe = { **pipe, "positive": positive_cond, "samples": samples, "loader_settings": { **pipe["loader_settings"], "positive": positive, } } del pipe return (new_pipe, samples, positive_cond) # 随机种 class easySeed: @classmethod def INPUT_TYPES(s): return { "required": { "seed_num": ("INT", {"default": 0, "min": 0, "max": 1125899906842624}), }, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID"}, } RETURN_TYPES = ("INT",) RETURN_NAMES = ("seed_num",) FUNCTION = "doit" CATEGORY = "EasyUse/Seed" OUTPUT_NODE = True def doit(self, seed_num=0, prompt=None, extra_pnginfo=None, my_unique_id=None): return seed_num, # 全局随机种 class globalSeed: @classmethod def INPUT_TYPES(s): return { "required": { "value": ("INT", {"default": 0, "min": 0, "max": 1125899906842624}), "mode": ("BOOLEAN", {"default": True, "label_on": "control_before_generate", "label_off": "control_after_generate"}), "action": (["fixed", "increment", "decrement", "randomize", "increment for each node", "decrement for each node", "randomize for each node"], ), "last_seed": ("STRING", {"default": ""}), } } RETURN_TYPES = () FUNCTION = "doit" CATEGORY = "EasyUse/Seed" OUTPUT_NODE = True def doit(self, **kwargs): return {} #---------------------------------------------------------------提示词 结束------------------------------------------------------------------------# #---------------------------------------------------------------加载器 开始----------------------------------------------------------------------# # 简易加载器完整 class fullLoader: @classmethod def INPUT_TYPES(cls): resolution_strings = [f"{width} x {height}" for width, height in BASE_RESOLUTIONS] a1111_prompt_style_default = False return {"required": { "ckpt_name": (folder_paths.get_filename_list("checkpoints"),), "config_name": (["Default", ] + folder_paths.get_filename_list("configs"), {"default": "Default"}), "vae_name": (["Baked VAE"] + folder_paths.get_filename_list("vae"),), "clip_skip": ("INT", {"default": -1, "min": -24, "max": 0, "step": 1}), "lora_name": (["None"] + folder_paths.get_filename_list("loras"),), "lora_model_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), "lora_clip_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), "resolution": (resolution_strings,), "empty_latent_width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}), "empty_latent_height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}), "positive": ("STRING", {"default": "Positive", "multiline": True}), "positive_token_normalization": (["none", "mean", "length", "length+mean"],), "positive_weight_interpretation": (["comfy", "A1111", "comfy++", "compel", "fixed attention"],), "negative": ("STRING", {"default": "Negative", "multiline": True}), "negative_token_normalization": (["none", "mean", "length", "length+mean"],), "negative_weight_interpretation": (["comfy", "A1111", "comfy++", "compel", "fixed attention"],), "batch_size": ("INT", {"default": 1, "min": 1, "max": 64}), }, "optional": {"model_override": ("MODEL",), "clip_override": ("CLIP",), "vae_override": ("VAE",), "optional_lora_stack": ("LORA_STACK",), "a1111_prompt_style": ("BOOLEAN", {"default": a1111_prompt_style_default}),}, "hidden": {"prompt": "PROMPT", "my_unique_id": "UNIQUE_ID"} } RETURN_TYPES = ("PIPE_LINE", "MODEL", "VAE", "CLIP") RETURN_NAMES = ("pipe", "model", "vae", "clip") FUNCTION = "adv_pipeloader" CATEGORY = "EasyUse/Loaders" def adv_pipeloader(self, ckpt_name, config_name, vae_name, clip_skip, lora_name, lora_model_strength, lora_clip_strength, resolution, empty_latent_width, empty_latent_height, positive, positive_token_normalization, positive_weight_interpretation, negative, negative_token_normalization, negative_weight_interpretation, batch_size, model_override=None, clip_override=None, vae_override=None, optional_lora_stack=None, a1111_prompt_style=False, prompt=None, my_unique_id=None ): model: ModelPatcher | None = None clip: CLIP | None = None vae: VAE | None = None can_load_lora = True pipe_lora_stack = [] # resolution if resolution != "自定义 x 自定义": try: width, height = map(int, resolution.split(' x ')) empty_latent_width = width empty_latent_height = height except ValueError: raise ValueError("Invalid base_resolution format.") # Create Empty Latent latent = torch.zeros([batch_size, 4, empty_latent_height // 8, empty_latent_width // 8]).cpu() samples = {"samples": latent} # Clean models from loaded_objects easyCache.update_loaded_objects(prompt) log_node_warn("正在处理模型...") # 判断是否存在 模型叠加xyplot, 若存在优先缓存第一个模型 xyinputs_id = next((x for x in prompt if str(prompt[x]["class_type"]) == "easy XYInputs: ModelMergeBlocks"), None) if xyinputs_id is not None: node = prompt[xyinputs_id] if "ckpt_name_1" in node["inputs"]: ckpt_name_1 = node["inputs"]["ckpt_name_1"] model, clip, vae = easyCache.load_checkpoint(ckpt_name_1) can_load_lora = False # Load models elif model_override is not None and clip_override is not None and vae_override is not None: model = model_override clip = clip_override vae = vae_override elif model_override is not None: raise Exception(f"[ERROR] clip or vae is missing") elif vae_override is not None: raise Exception(f"[ERROR] model or clip is missing") elif clip_override is not None: raise Exception(f"[ERROR] model or vae is missing") else: model, clip, vae = easyCache.load_checkpoint(ckpt_name, config_name) if optional_lora_stack is not None: for lora in optional_lora_stack: if can_load_lora: model, clip = easyCache.load_lora(lora[0], model, clip, lora[1], lora[2]) pipe_lora_stack.append({"lora_name": lora[0], "model": model, "clip": clip, "lora_model_strength": lora[1], "lora_clip_strength": lora[2]}) if lora_name != "None": if can_load_lora: model, clip = easyCache.load_lora(lora_name, model, clip, lora_model_strength, lora_clip_strength) pipe_lora_stack.append({"lora_name": lora_name, "model": model, "clip": clip, "lora_model_strength": lora_model_strength, "lora_clip_strength": lora_clip_strength}) # Check for custom VAE if vae_name not in ["Baked VAE", "Baked-VAE"]: vae = easyCache.load_vae(vae_name) # CLIP skip if not clip: raise Exception("No CLIP found") log_node_warn("正在处理提示词...") positive_seed = find_wildcards_seed(positive, prompt) model, clip, positive, positive_decode, show_positive_prompt, pipe_lora_stack = process_with_loras(positive, model, clip, "Positive", positive_seed, can_load_lora, pipe_lora_stack) positive_wildcard_prompt = positive_decode if show_positive_prompt else "" negative_seed = find_wildcards_seed(negative, prompt) model, clip, negative, negative_decode, show_negative_prompt, pipe_lora_stack = process_with_loras(negative, model, clip, "Negative", negative_seed, can_load_lora, pipe_lora_stack) negative_wildcard_prompt = negative_decode if show_negative_prompt else "" clipped = clip.clone() if clip_skip != 0 and can_load_lora: clipped.clip_layer(clip_skip) log_node_warn("正在处理提示词编码...") # Use new clip text encode by smzNodes like same as webui, when if you installed the smzNodes if a1111_prompt_style: if "smZ CLIPTextEncode" in ALL_NODE_CLASS_MAPPINGS: cls = ALL_NODE_CLASS_MAPPINGS['smZ CLIPTextEncode'] steps = find_nearest_steps(my_unique_id, prompt) positive_embeddings_final, = cls().encode(clipped, positive, "A1111", True, True, False, False, 6, 1024, 1024, 0, 0, 1024, 1024, '', '', steps) negative_embeddings_final, = cls().encode(clipped, negative, "A1111", True, True, False, False, 6, 1024, 1024, 0, 0, 1024, 1024, '', '', steps) else: raise Exception(f"[ERROR] To use clip text encode same as webui, you need to install 'smzNodes'") else: positive_embeddings_final, positive_pooled = advanced_encode(clipped, positive, positive_token_normalization, positive_weight_interpretation, w_max=1.0, apply_to_pooled='enable') positive_embeddings_final = [[positive_embeddings_final, {"pooled_output": positive_pooled}]] negative_embeddings_final, negative_pooled = advanced_encode(clipped, negative, negative_token_normalization, negative_weight_interpretation, w_max=1.0, apply_to_pooled='enable') negative_embeddings_final = [[negative_embeddings_final, {"pooled_output": negative_pooled}]] image = easySampler.pil2tensor(Image.new('RGB', (1, 1), (0, 0, 0))) log_node_warn("处理结束...") pipe = {"model": model, "positive": positive_embeddings_final, "negative": negative_embeddings_final, "vae": vae, "clip": clip, "samples": samples, "images": image, "seed": 0, "loader_settings": {"ckpt_name": ckpt_name, "vae_name": vae_name, "lora_name": lora_name, "lora_model_strength": lora_model_strength, "lora_clip_strength": lora_clip_strength, "lora_stack": pipe_lora_stack, "refiner_ckpt_name": None, "refiner_vae_name": None, "refiner_lora_name": None, "refiner_lora_model_strength": None, "refiner_lora_clip_strength": None, "clip_skip": clip_skip, "a1111_prompt_style": a1111_prompt_style, "positive": positive, "positive_l": None, "positive_g": None, "positive_token_normalization": positive_token_normalization, "positive_weight_interpretation": positive_weight_interpretation, "positive_balance": None, "negative": negative, "negative_l": None, "negative_g": None, "negative_token_normalization": negative_token_normalization, "negative_weight_interpretation": negative_weight_interpretation, "negative_balance": None, "empty_latent_width": empty_latent_width, "empty_latent_height": empty_latent_height, "batch_size": batch_size, "seed": 0, "empty_samples": samples, } } return {"ui": {"positive": positive_wildcard_prompt, "negative": negative_wildcard_prompt}, "result": (pipe, model, vae, clip)} # A1111简易加载器 class a1111Loader: @classmethod def INPUT_TYPES(cls): resolution_strings = [f"{width} x {height}" for width, height in BASE_RESOLUTIONS] a1111_prompt_style_default = False checkpoints = folder_paths.get_filename_list("checkpoints") loras = ["None"] + folder_paths.get_filename_list("loras") return {"required": { "ckpt_name": (checkpoints,), "vae_name": (["Baked VAE"] + folder_paths.get_filename_list("vae"),), "clip_skip": ("INT", {"default": -1, "min": -24, "max": 0, "step": 1}), "lora_name": (loras,), "lora_model_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), "lora_clip_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), "resolution": (resolution_strings, {"default": "512 x 512"}), "empty_latent_width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}), "empty_latent_height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}), "positive": ("STRING", {"default": "Positive", "multiline": True}), "negative": ("STRING", {"default": "Negative", "multiline": True}), "batch_size": ("INT", {"default": 1, "min": 1, "max": 64}), }, "optional": {"optional_lora_stack": ("LORA_STACK",), "a1111_prompt_style": ("BOOLEAN", {"default": a1111_prompt_style_default})}, "hidden": {"prompt": "PROMPT", "my_unique_id": "UNIQUE_ID"} } RETURN_TYPES = ("PIPE_LINE", "MODEL", "VAE") RETURN_NAMES = ("pipe", "model", "vae") FUNCTION = "adv_pipeloader" CATEGORY = "EasyUse/Loaders" def adv_pipeloader(self, ckpt_name, vae_name, clip_skip, lora_name, lora_model_strength, lora_clip_strength, resolution, empty_latent_width, empty_latent_height, positive, negative, batch_size, optional_lora_stack=None, a1111_prompt_style=False, prompt=None, my_unique_id=None): return fullLoader.adv_pipeloader(self, ckpt_name, 'Default', vae_name, clip_skip, lora_name, lora_model_strength, lora_clip_strength, resolution, empty_latent_width, empty_latent_height, positive, 'none', 'A1111', negative,'none','A1111', batch_size, None, None, None, optional_lora_stack, a1111_prompt_style, prompt, my_unique_id ) # Comfy简易加载器 class comfyLoader: @classmethod def INPUT_TYPES(cls): resolution_strings = [f"{width} x {height}" for width, height in BASE_RESOLUTIONS] return {"required": { "ckpt_name": (folder_paths.get_filename_list("checkpoints"),), "vae_name": (["Baked VAE"] + folder_paths.get_filename_list("vae"),), "clip_skip": ("INT", {"default": -1, "min": -24, "max": 0, "step": 1}), "lora_name": (["None"] + folder_paths.get_filename_list("loras"),), "lora_model_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), "lora_clip_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), "resolution": (resolution_strings, {"default": "512 x 512"}), "empty_latent_width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}), "empty_latent_height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}), "positive": ("STRING", {"default": "Positive", "multiline": True}), "negative": ("STRING", {"default": "Negative", "multiline": True}), "batch_size": ("INT", {"default": 1, "min": 1, "max": 64}), }, "optional": {"optional_lora_stack": ("LORA_STACK",)}, "hidden": {"prompt": "PROMPT", "positive_weight_interpretation": "comfy", "negative_weight_interpretation": "comfy"}, "my_unique_id": "UNIQUE_ID"} RETURN_TYPES = ("PIPE_LINE", "MODEL", "VAE") RETURN_NAMES = ("pipe", "model", "vae") FUNCTION = "adv_pipeloader" CATEGORY = "EasyUse/Loaders" def adv_pipeloader(self, ckpt_name, vae_name, clip_skip, lora_name, lora_model_strength, lora_clip_strength, resolution, empty_latent_width, empty_latent_height, positive, negative, batch_size, optional_lora_stack=None, prompt=None, my_unique_id=None): return fullLoader.adv_pipeloader(self, ckpt_name, 'Default', vae_name, clip_skip, lora_name, lora_model_strength, lora_clip_strength, resolution, empty_latent_width, empty_latent_height, positive, 'none', 'comfy', negative, 'none', 'comfy', batch_size, None, None, None, optional_lora_stack, False, prompt, my_unique_id ) # Zero123简易加载器 (3D) try: except FileNotFoundError: log_node_error("EasyUse[zero123Loader]", "请更新ComfyUI到最新版本") class zero123Loader: @classmethod def INPUT_TYPES(cls): def get_file_list(filenames): return [file for file in filenames if file != "put_models_here.txt" and "zero123" in file] return {"required": { "ckpt_name": (get_file_list(folder_paths.get_filename_list("checkpoints")),), "vae_name": (["Baked VAE"] + folder_paths.get_filename_list("vae"),), "init_image": ("IMAGE",), "empty_latent_width": ("INT", {"default": 256, "min": 16, "max": MAX_RESOLUTION, "step": 8}), "empty_latent_height": ("INT", {"default": 256, "min": 16, "max": MAX_RESOLUTION, "step": 8}), "batch_size": ("INT", {"default": 1, "min": 1, "max": 64}), "elevation": ("FLOAT", {"default": 0.0, "min": -180.0, "max": 180.0}), "azimuth": ("FLOAT", {"default": 0.0, "min": -180.0, "max": 180.0}), }, "hidden": {"prompt": "PROMPT"}, "my_unique_id": "UNIQUE_ID"} RETURN_TYPES = ("PIPE_LINE", "MODEL", "VAE") RETURN_NAMES = ("pipe", "model", "vae") FUNCTION = "adv_pipeloader" CATEGORY = "EasyUse/Loaders" def adv_pipeloader(self, ckpt_name, vae_name, init_image, empty_latent_width, empty_latent_height, batch_size, elevation, azimuth, prompt=None, my_unique_id=None): model: ModelPatcher | None = None vae: VAE | None = None clip: CLIP | None = None clip_vision = None # Clean models from loaded_objects easyCache.update_loaded_objects(prompt) model, clip_vision, vae = easyCache.load_checkpoint(ckpt_name, "Default", True) output = clip_vision.encode_image(init_image) pooled = output.image_embeds.unsqueeze(0) pixels = comfy.utils.common_upscale(init_image.movedim(-1, 1), empty_latent_width, empty_latent_height, "bilinear", "center").movedim(1, -1) encode_pixels = pixels[:, :, :, :3] t = vae.encode(encode_pixels) cam_embeds = camera_embeddings(elevation, azimuth) cond = torch.cat([pooled, cam_embeds.repeat((pooled.shape[0], 1, 1))], dim=-1) positive = [[cond, {"concat_latent_image": t}]] negative = [[torch.zeros_like(pooled), {"concat_latent_image": torch.zeros_like(t)}]] latent = torch.zeros([batch_size, 4, empty_latent_height // 8, empty_latent_width // 8]) samples = {"samples": latent} image = easySampler.pil2tensor(Image.new('RGB', (1, 1), (0, 0, 0))) pipe = {"model": model, "positive": positive, "negative": negative, "vae": vae, "clip": clip, "samples": samples, "images": image, "seed": 0, "loader_settings": {"ckpt_name": ckpt_name, "vae_name": vae_name, "positive": positive, "positive_l": None, "positive_g": None, "positive_balance": None, "negative": negative, "negative_l": None, "negative_g": None, "negative_balance": None, "empty_latent_width": empty_latent_width, "empty_latent_height": empty_latent_height, "batch_size": batch_size, "seed": 0, "empty_samples": samples, } } return (pipe, model, vae) #svd加载器 class svdLoader: @classmethod def INPUT_TYPES(cls): resolution_strings = [f"{width} x {height}" for width, height in BASE_RESOLUTIONS] def get_file_list(filenames): return [file for file in filenames if file != "put_models_here.txt" and "svd" in file] return {"required": { "ckpt_name": (get_file_list(folder_paths.get_filename_list("checkpoints")),), "vae_name": (["Baked VAE"] + folder_paths.get_filename_list("vae"),), "init_image": ("IMAGE",), "resolution": (resolution_strings, {"default": "1024 x 576"}), "empty_latent_width": ("INT", {"default": 256, "min": 16, "max": MAX_RESOLUTION, "step": 8}), "empty_latent_height": ("INT", {"default": 256, "min": 16, "max": MAX_RESOLUTION, "step": 8}), "video_frames": ("INT", {"default": 14, "min": 1, "max": 4096}), "motion_bucket_id": ("INT", {"default": 127, "min": 1, "max": 1023}), "fps": ("INT", {"default": 6, "min": 1, "max": 1024}), "augmentation_level": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 10.0, "step": 0.01}) }, "hidden": {"prompt": "PROMPT"}, "my_unique_id": "UNIQUE_ID"} RETURN_TYPES = ("PIPE_LINE", "MODEL", "VAE") RETURN_NAMES = ("pipe", "model", "vae") FUNCTION = "adv_pipeloader" CATEGORY = "EasyUse/Loaders" def adv_pipeloader(self, ckpt_name, vae_name, init_image, resolution, empty_latent_width, empty_latent_height, video_frames, motion_bucket_id, fps, augmentation_level, prompt=None, my_unique_id=None): model: ModelPatcher | None = None vae: VAE | None = None clip: CLIP | None = None clip_vision = None # resolution if resolution != "自定义 x 自定义": try: width, height = map(int, resolution.split(' x ')) empty_latent_width = width empty_latent_height = height except ValueError: raise ValueError("Invalid base_resolution format.") # Clean models from loaded_objects easyCache.update_loaded_objects(prompt) model, clip_vision, vae = easyCache.load_checkpoint(ckpt_name, "Default", True) output = clip_vision.encode_image(init_image) pooled = output.image_embeds.unsqueeze(0) pixels = comfy.utils.common_upscale(init_image.movedim(-1, 1), empty_latent_width, empty_latent_height, "bilinear", "center").movedim(1, -1) encode_pixels = pixels[:, :, :, :3] if augmentation_level > 0: encode_pixels += torch.randn_like(pixels) * augmentation_level t = vae.encode(encode_pixels) positive = [[pooled, {"motion_bucket_id": motion_bucket_id, "fps": fps, "augmentation_level": augmentation_level, "concat_latent_image": t}]] negative = [[torch.zeros_like(pooled), {"motion_bucket_id": motion_bucket_id, "fps": fps, "augmentation_level": augmentation_level, "concat_latent_image": torch.zeros_like(t)}]] latent = torch.zeros([video_frames, 4, empty_latent_height // 8, empty_latent_width // 8]) samples = {"samples": latent} image = easySampler.pil2tensor(Image.new('RGB', (1, 1), (0, 0, 0))) pipe = {"model": model, "positive": positive, "negative": negative, "vae": vae, "clip": clip, "samples": samples, "images": image, "seed": 0, "loader_settings": {"ckpt_name": ckpt_name, "vae_name": vae_name, "positive": positive, "positive_l": None, "positive_g": None, "positive_balance": None, "negative": negative, "negative_l": None, "negative_g": None, "negative_balance": None, "empty_latent_width": empty_latent_width, "empty_latent_height": empty_latent_height, "batch_size": 1, "seed": 0, "empty_samples": samples, } } return (pipe, model, vae) # lora class loraStackLoader: def __init__(self): pass @classmethod def INPUT_TYPES(s): max_lora_num = 10 inputs = { "required": { "toggle": ([True, False],), "mode": (["simple", "advanced"],), "num_loras": ("INT", {"default": 1, "min": 0, "max": max_lora_num}), }, "optional": { "optional_lora_stack": ("LORA_STACK",), }, } for i in range(1, max_lora_num+1): inputs["optional"][f"lora_{i}_name"] = ( ["None"] + folder_paths.get_filename_list("loras"), {"default": "None"}) inputs["optional"][f"lora_{i}_strength"] = ( "FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}) inputs["optional"][f"lora_{i}_model_strength"] = ( "FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}) inputs["optional"][f"lora_{i}_clip_strength"] = ( "FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}) return inputs RETURN_TYPES = ("LORA_STACK",) RETURN_NAMES = ("lora_stack",) FUNCTION = "stack" CATEGORY = "EasyUse/Loaders" def stack(self, toggle, mode, num_loras, lora_stack=None, **kwargs): if (toggle in [False, None, "False"]) or not kwargs: return None loras = [] # Import Stack values if lora_stack is not None: loras.extend([l for l in lora_stack if l[0] != "None"]) # Import Lora values for i in range(1, num_loras + 1): lora_name = kwargs.get(f"lora_{i}_name") if not lora_name or lora_name == "None": continue if mode == "simple": lora_strength = float(kwargs.get(f"lora_{i}_strength")) loras.append((lora_name, lora_strength, lora_strength)) elif mode == "advanced": model_strength = float(kwargs.get(f"lora_{i}_model_strength")) clip_strength = float(kwargs.get(f"lora_{i}_clip_strength")) loras.append((lora_name, model_strength, clip_strength)) return (loras,) # controlnet class controlnetSimple: @classmethod def INPUT_TYPES(s): def get_file_list(filenames): return [file for file in filenames if file != "put_models_here.txt" and "lllite" not in file] return { "required": { "pipe": ("PIPE_LINE",), "image": ("IMAGE",), "control_net_name": (get_file_list(folder_paths.get_filename_list("controlnet")),), }, "optional": { "control_net": ("CONTROL_NET",), "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}) } } RETURN_TYPES = ("PIPE_LINE",) RETURN_NAMES = ("pipe",) OUTPUT_NODE = True FUNCTION = "controlnetApply" CATEGORY = "EasyUse/Loaders" def controlnetApply(self, pipe, image, control_net_name, control_net=None,strength=1): if control_net is None: controlnet_path = folder_paths.get_full_path("controlnet", control_net_name) control_net = comfy.controlnet.load_controlnet(controlnet_path) control_hint = image.movedim(-1, 1) positive = pipe["positive"] negative = pipe["negative"] if strength != 0: if negative is None: p = [] for t in positive: n = [t[0], t[1].copy()] c_net = control_net.copy().set_cond_hint(control_hint, strength) if 'control' in t[1]: c_net.set_previous_controlnet(t[1]['control']) n[1]['control'] = c_net n[1]['control_apply_to_uncond'] = True p.append(n) positive = p else: cnets = {} out = [] for conditioning in [positive, negative]: c = [] for t in conditioning: d = t[1].copy() prev_cnet = d.get('control', None) if prev_cnet in cnets: c_net = cnets[prev_cnet] else: c_net = control_net.copy().set_cond_hint(control_hint, strength) c_net.set_previous_controlnet(prev_cnet) cnets[prev_cnet] = c_net d['control'] = c_net d['control_apply_to_uncond'] = False n = [t[0], d] c.append(n) out.append(c) positive = out[0] negative = out[1] new_pipe = { "model": pipe['model'], "positive": positive, "negative": negative, "vae": pipe['vae'], "clip": pipe['clip'], "samples": pipe["samples"], "images": pipe["images"], "seed": 0, "loader_settings": pipe["loader_settings"] } return (new_pipe,) # controlnetADV class controlnetAdvanced: @classmethod def INPUT_TYPES(s): def get_file_list(filenames): return [file for file in filenames if file != "put_models_here.txt" and "lllite" not in file] return { "required": { "pipe": ("PIPE_LINE",), "image": ("IMAGE",), "control_net_name": (get_file_list(folder_paths.get_filename_list("controlnet")),), }, "optional": { "control_net": ("CONTROL_NET",), "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), "start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}), "end_percent": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}) } } RETURN_TYPES = ("PIPE_LINE",) RETURN_NAMES = ("pipe",) OUTPUT_NODE = True FUNCTION = "controlnetApply" CATEGORY = "EasyUse/Loaders" def controlnetApply(self, pipe, image, control_net_name, control_net=None, strength=1, start_percent=0, end_percent=1): if control_net is None: controlnet_path = folder_paths.get_full_path("controlnet", control_net_name) control_net = comfy.controlnet.load_controlnet(controlnet_path) control_hint = image.movedim(-1, 1) positive = pipe["positive"] negative = pipe["negative"] if strength != 0: if negative is None: p = [] for t in positive: n = [t[0], t[1].copy()] c_net = control_net.copy().set_cond_hint(control_hint, strength) if 'control' in t[1]: c_net.set_previous_controlnet(t[1]['control']) n[1]['control'] = c_net n[1]['control_apply_to_uncond'] = True p.append(n) positive = p else: cnets = {} out = [] for conditioning in [positive, negative]: c = [] for t in conditioning: d = t[1].copy() prev_cnet = d.get('control', None) if prev_cnet in cnets: c_net = cnets[prev_cnet] else: c_net = control_net.copy().set_cond_hint(control_hint, strength, (start_percent, end_percent)) c_net.set_previous_controlnet(prev_cnet) cnets[prev_cnet] = c_net d['control'] = c_net d['control_apply_to_uncond'] = False n = [t[0], d] c.append(n) out.append(c) positive = out[0] negative = out[1] new_pipe = { "model": pipe['model'], "positive": positive, "negative": negative, "vae": pipe['vae'], "clip": pipe['clip'], "samples": pipe["samples"], "images": pipe["images"], "seed": 0, "loader_settings": pipe["loader_settings"] } del pipe return (new_pipe,) #---------------------------------------------------------------预采样 开始----------------------------------------------------------------------# # 预采样设置(基础) class samplerSettings: def __init__(self): pass @classmethod def INPUT_TYPES(cls): return {"required": {"pipe": ("PIPE_LINE",), "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), "scheduler": (comfy.samplers.KSampler.SCHEDULERS,), "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), "seed_num": ("INT", {"default": 0, "min": 0, "max": 1125899906842624}), }, "optional": { "image_to_latent": ("IMAGE",), "latent": ("LATENT",), }, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID"}, } RETURN_TYPES = ("PIPE_LINE", ) RETURN_NAMES = ("pipe",) OUTPUT_NODE = True FUNCTION = "settings" CATEGORY = "EasyUse/PreSampling" def settings(self, pipe, steps, cfg, sampler_name, scheduler, denoise, seed_num, image_to_latent=None, latent=None, prompt=None, extra_pnginfo=None, my_unique_id=None): # if my_unique_id: # workflow = extra_pnginfo["workflow"] # node = next((x for x in workflow["nodes"] if str(x["id"]) == my_unique_id), None) # if node: # seed_num = prompt[my_unique_id]['inputs']['seed_num'] if 'seed_num' in prompt[my_unique_id][ # 'inputs'] else 0 # length = len(node["widgets_values"]) # node["widgets_values"][length - 2] = seed_num # 图生图转换 vae = pipe["vae"] batch_size = pipe["loader_settings"]["batch_size"] if "batch_size" in pipe["loader_settings"] else 1 if image_to_latent is not None: samples = {"samples": vae.encode(image_to_latent)} samples = RepeatLatentBatch().repeat(samples, batch_size)[0] images = image_to_latent elif latent is not None: samples = RepeatLatentBatch().repeat(latent, batch_size)[0] images = pipe["images"] else: samples = pipe["samples"] images = pipe["images"] new_pipe = { "model": pipe['model'], "positive": pipe['positive'], "negative": pipe['negative'], "vae": pipe['vae'], "clip": pipe['clip'], "samples": samples, "images": images, "seed": seed_num, "loader_settings": { **pipe["loader_settings"], "steps": steps, "cfg": cfg, "sampler_name": sampler_name, "scheduler": scheduler, "denoise": denoise, "add_noise": "enabled" } } del pipe return {"ui": {"value": [seed_num]}, "result": (new_pipe,)} # 预采样设置(高级) class samplerSettingsAdvanced: def __init__(self): pass @classmethod def INPUT_TYPES(cls): return {"required": {"pipe": ("PIPE_LINE",), "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), "scheduler": (comfy.samplers.KSampler.SCHEDULERS,), "start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}), "end_at_step": ("INT", {"default": 10000, "min": 0, "max": 10000}), "add_noise": (["enable", "disable"],), "seed_num": ("INT", {"default": 0, "min": 0, "max": 1125899906842624}), }, "optional": { "image_to_latent": ("IMAGE",), "latent": ("LATENT",) }, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID"}, } RETURN_TYPES = ("PIPE_LINE", ) RETURN_NAMES = ("pipe",) OUTPUT_NODE = True FUNCTION = "settings" CATEGORY = "EasyUse/PreSampling" def settings(self, pipe, steps, cfg, sampler_name, scheduler, start_at_step, end_at_step, add_noise, seed_num, image_to_latent=None, latent=None, prompt=None, extra_pnginfo=None, my_unique_id=None): # if my_unique_id: # workflow = extra_pnginfo["workflow"] # node = next((x for x in workflow["nodes"] if str(x["id"]) == my_unique_id), None) # if node: # seed_num = prompt[my_unique_id]['inputs']['seed_num'] if 'seed_num' in prompt[my_unique_id][ # 'inputs'] else 0 # length = len(node["widgets_values"]) # node["widgets_values"][length - 2] = seed_num # 图生图转换 vae = pipe["vae"] batch_size = pipe["loader_settings"]["batch_size"] if "batch_size" in pipe["loader_settings"] else 1 if image_to_latent is not None: samples = {"samples": vae.encode(image_to_latent)} samples = RepeatLatentBatch().repeat(samples, batch_size)[0] images = image_to_latent elif latent is not None: samples = RepeatLatentBatch().repeat(latent, batch_size)[0] images = pipe["images"] else: samples = pipe["samples"] images = pipe["images"] new_pipe = { "model": pipe['model'], "positive": pipe['positive'], "negative": pipe['negative'], "vae": pipe['vae'], "clip": pipe['clip'], "samples": samples, "images": images, "seed": seed_num, "loader_settings": { **pipe["loader_settings"], "steps": steps, "cfg": cfg, "sampler_name": sampler_name, "scheduler": scheduler, "start_step": start_at_step, "last_step": end_at_step, "denoise": 1.0, "add_noise": add_noise } } del pipe return {"ui": {"value": [seed_num]}, "result": (new_pipe,)} # 预采样设置(SDTurbo) class sdTurboSettings: def __init__(self): pass @classmethod def INPUT_TYPES(cls): return {"required": { "pipe": ("PIPE_LINE",), "steps": ("INT", {"default": 1, "min": 1, "max": 10}), "cfg": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0}), "sampler_name": (comfy.samplers.SAMPLER_NAMES,), "eta": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01, "round": False}), "s_noise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01, "round": False}), "upscale_ratio": ("FLOAT", {"default": 2.0, "min": 0.0, "max": 16.0, "step": 0.01, "round": False}), "start_step": ("INT", {"default": 5, "min": 0, "max": 1000, "step": 1}), "end_step": ("INT", {"default": 15, "min": 0, "max": 1000, "step": 1}), "upscale_n_step": ("INT", {"default": 3, "min": 0, "max": 1000, "step": 1}), "unsharp_kernel_size": ("INT", {"default": 3, "min": 1, "max": 21, "step": 1}), "unsharp_sigma": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 10.0, "step": 0.01, "round": False}), "unsharp_strength": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 10.0, "step": 0.01, "round": False}), "seed_num": ("INT", {"default": 0, "min": 0, "max": 1125899906842624}), }, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID"}, } RETURN_TYPES = ("PIPE_LINE",) RETURN_NAMES = ("pipe",) OUTPUT_NODE = True FUNCTION = "settings" CATEGORY = "EasyUse/PreSampling" def settings(self, pipe, steps, cfg, sampler_name, eta, s_noise, upscale_ratio, start_step, end_step, upscale_n_step, unsharp_kernel_size, unsharp_sigma, unsharp_strength, seed_num, prompt=None, extra_pnginfo=None, my_unique_id=None): model = pipe['model'] # sigma timesteps = torch.flip(torch.arange(1, 11) * 100 - 1, (0,))[:steps] sigmas = model.model.model_sampling.sigma(timesteps) sigmas = torch.cat([sigmas, sigmas.new_zeros([1])]) #sampler sample_function = None extra_options = { "eta": eta, "s_noise": s_noise, "upscale_ratio": upscale_ratio, "start_step": start_step, "end_step": end_step, "upscale_n_step": upscale_n_step, "unsharp_kernel_size": unsharp_kernel_size, "unsharp_sigma": unsharp_sigma, "unsharp_strength": unsharp_strength, } if sampler_name == "euler_ancestral": sample_function = sample_euler_ancestral elif sampler_name == "dpmpp_2s_ancestral":
sample_function = sample_dpmpp_2s_ancestral
9
2023-12-10 07:02:36+00:00
12k
Open-All-Scale-Causal-Engine/OpenASCE
openasce/inference/graph_inference.py
[ { "identifier": "CausalGraph", "path": "openasce/discovery/causal_graph.py", "snippet": "class CausalGraph(object):\n \"\"\"Causal Graph Class\n\n Represent the casual graph\n\n \"\"\"\n\n DEFAULT_COLUMN_NAME_PREFIX = \"x\"\n\n def __init__(self, names=[], bn=None, w: np.ndarray = None):\n \"\"\"Constructor\n\n Arguments:\n names: the node names\n bn: basic causal graph\n w: the connection matrix for causal graph\n\n \"\"\"\n self.para = None\n self.parents = {} # {c1:[p1, p2],c2:[p2,p3]....}\n self.names_to_index = {}\n self.index_to_names = {}\n self.n = 0\n self.index_exclude = []\n if bn is not None:\n self.copy(bn)\n else:\n if names:\n self.names_init(names)\n if w is not None:\n if self.names_to_index and self.index_to_names and self.parents:\n pass\n else:\n self.names_init(\n [\n self.DEFAULT_COLUMN_NAME_PREFIX + str(i)\n for i in range(w.shape[0])\n ]\n )\n nz = w.nonzero()\n for _ in map(lambda x: self.add_edge(x[0], x[1]), zip(nz[0], nz[1])):\n pass\n\n def names_init(self, names: List[str]) -> None:\n \"\"\"Initialize the graph with feature names\n\n initialize the names_to_index and index_to_names attributes\n initialize parents[i] = set() (no edges for the moment)\n\n Arguments:\n names (list of string): the names of the nodes\n\n Returns:\n None\n \"\"\"\n tmp_names = copy.deepcopy(names)\n self.names_to_index = {name: index for index, name in enumerate(names)}\n self.index_to_names = {index: name for index, name in enumerate(tmp_names)}\n self.n = len(self.names_to_index)\n for i in range(self.n):\n self.parents[i] = set()\n\n def parents_exclude(self, name_list: List[str]) -> None:\n for name in name_list:\n self.index_exclude.append(self.names_to_index[name])\n\n def random_init(self, max_parents: int = None) -> None:\n \"\"\"Add edges randomly\n\n For each node, pick a random number of the desired number of parents.\n Then, for each candidate, pick another random number. In average,\n the node will have the desired number of parents.\n\n Arguments:\n max_parents: maximal number of one node's parents\n \"\"\"\n max_parents = max_parents if max_parents else self.n - 1\n\n for i in range(self.n):\n nparents = np.random.randint(0, max_parents + 1)\n p = nparents / (self.n + 1.0)\n for j in range(self.n):\n if j != i and np.random.uniform() < p:\n self.add_edge(j, i)\n\n def merge(\n self, g1, g2, p1=1, p2=1, max_parents: int = None, mut_rate: float = 0.0\n ) -> None:\n \"\"\"Pick up edges from both g1 and g2 according to some random policy\n\n Arguments:\n g1 (CausalGraph)\n g1 (CausalGraph)\n p1 (float in [0,1]): proba of an edge in g1 being in self\n p2 (float in [0,1]): proba of an edge in g2 being in self\n p1 + p2 = 1\n max_parents (int)\n\n \"\"\"\n # merge randomly the two graphs\n self.random_merge(g1, g2, p1, p2)\n\n # introduce mutations\n self.mutate(mut_rate)\n\n # remove extra parents\n self.remove_extra_parents(max_parents)\n\n def random_merge(self, g1, g2, p1, p2) -> None:\n \"\"\"Creates graph from edges both in g1 and g2. Adds edges according to proba p1 and p2\n\n Arguments:\n g1 (CausalGraph)\n g1 (CausalGraph)\n p1 (float in [0,1]): proba of an edge in g1 being in self\n p2 (float in [0,1]): proba of an edge in g2 being in self\n \"\"\"\n for i, js in g1.parents.items():\n for j in js:\n if np.random.uniform() < p1 or p1 == 1:\n self.add_edge(j, i)\n for i, js in g2.parents.items():\n for j in js:\n if np.random.uniform() < p2 or p2 == 1:\n self.add_edge(j, i)\n\n def mutate(self, mut_rate: float = 0) -> None:\n \"\"\"Introduces new edges with a probability mut_rate\n\n Arguments:\n mut_rate (float in [0,1]): proba of mutation\n \"\"\"\n if mut_rate != 0:\n \"\"\"Do mutation like the following code snippet\n for i in range(self.n):\n for j in range(self.n):\n p = np.random.uniform()\n if p < mut_rate:\n if p < mut_rate / 2:\n self.add_edge(i, j)\n else:\n self.remove_edge(i, j)\n \"\"\"\n for _ in map(\n lambda x: self.add_edge(x[0], x[1])\n if x[2] < 0.25\n else self.remove_edge(x[0], x[1]),\n filter(\n lambda x: x[2] <= 0.5,\n map(\n lambda x: x + (np.random.uniform(),),\n itertools.product(self.n, self.n),\n ),\n ),\n ):\n pass\n\n def remove_extra_parents(self, max_parents: int = None) -> None:\n \"\"\"Removes extra edges if does not respect max parents constraint\n\n Arguments:\n max_parents: the maximal number of the node's parents\n \"\"\"\n if max_parents is not None:\n for i, js in self.parents.items():\n if len(js) > max_parents:\n indices = np.random.permutation(range(len(js)))\n for j in indices[0 : len(js) - max_parents]:\n self.remove_edge(j, i)\n\n def num_save(self, file_name: str) -> None:\n \"\"\"\n Saves the graph in number format\n\n Example\n parent1, child1\n parent2, child2\n\n Arguments:\n file_name: saved file path\n \"\"\"\n with open(file_name, \"w\") as f:\n for child_index, parents in self.parents.items():\n for parent_index in parents:\n f.write(f\"{parent_index},{child_index}\\n\")\n\n def save(self, file_path: str) -> None:\n \"\"\"Saves the graph in the desired format\n\n Example\n parent1, child1\n parent2, child2\n Arguments:\n file_path: saved file path\n \"\"\"\n with open(file_path, \"w\") as f:\n for child_index, parents in self.parents.items():\n for parent_index in parents:\n parent = self.index_to_names.get(parent_index)\n child = self.index_to_names.get(child_index)\n f.write(f\"{parent},{child}\\n\")\n\n def load(self, file_name: str) -> None:\n \"\"\"Loads structure from file. See save method\n\n Arguments:\n file_name: the path of the file to be loaded\n \"\"\"\n if not (self.names_to_index and self.index_to_names):\n name_set = set()\n # Go through the file to get all node names\n with open(file_name) as f:\n for line in f:\n line = line.strip().split(\",\")\n if len(line) == 2:\n p = line[0].replace(\"'\", \"\").replace('\"', \"\").strip()\n c = line[1].replace(\"'\", \"\").replace('\"', \"\").strip()\n if p not in name_set:\n name_set.add(p)\n if c not in name_set:\n name_set.add(c)\n self.names_to_index = {name: index for index, name in enumerate(name_set)}\n self.index_to_names = {index: name for index, name in enumerate(name_set)}\n with open(file_name) as f:\n for line in f:\n line = line.strip().split(\",\")\n if len(line) == 2:\n p = line[0].replace(\"'\", \"\").replace('\"', \"\").strip()\n c = line[1].replace(\"'\", \"\").replace('\"', \"\").strip()\n logger.info(f\"p={p}, c={c}\")\n p_index, c_index = self.names_to_index[p], self.names_to_index[c]\n self.add_edge(p_index, c_index)\n\n def is_cyclic(self) -> bool:\n \"\"\"Returns True if a cycle is found else False.\n\n Iterates over the nodes to find all the parents' parents, etc.\n A cycle is found if a node belongs to its own parent's set.\n\n \"\"\"\n all_parents = copy.deepcopy(self.parents)\n update = True\n while update:\n update = False\n for i in range(self.n):\n parents = list(all_parents[i])\n nparents = len(parents)\n for p in parents:\n all_parents[i].update(all_parents[p])\n if nparents != len(all_parents[i]):\n update = True\n if i in all_parents[i]:\n return True\n return False\n\n def copy(self, cg) -> None:\n \"\"\"Copies the structure of cg inside self and erases everything else\n\n Arguments:\n cg (CausalGraph): model\n \"\"\"\n self.index_to_names = copy.deepcopy(cg.index_to_names)\n self.names_to_index = copy.deepcopy(cg.names_to_index)\n self.n = cg.n\n self.parents = copy.deepcopy(cg.parents)\n\n def add_edge(\n self, parent: Union[int, str], child: Union[int, str], max_parents=None\n ) -> bool:\n \"\"\"Adds edge if respects max parents constraint and does not create a cycle\n\n Arguments:\n parent (int): id of parent\n child (int): id of child\n max_parents (int): None means no constraints\n\n Returns\n True if actually added the edge and False means no way to add the edge\n \"\"\"\n parent = self.names_to_index.get(parent) if isinstance(parent, str) else parent\n child = self.names_to_index.get(child) if isinstance(child, str) else child\n if (\n parent is None\n or child is None\n or parent >= self.n\n or child >= self.n\n or parent == child\n ):\n raise ValueError(f\"Error parent or child\")\n if max_parents is not None and len(self.parents[child]) >= max_parents:\n return False\n if child not in self.parents:\n self.parents[child] = set()\n self.parents[child].add(parent)\n if self.is_cyclic():\n logger.debug(\n f\"The edge from {parent} to {child} produces a cycle and be refused\"\n )\n self.remove_edge(parent, child)\n return False\n return True\n\n def remove_edge(self, parent: int, child: int, force: bool = True) -> None:\n try:\n self.parents[child].remove(parent)\n except Exception as e:\n if force:\n logger.debug(f\"Exception happens in remove edge: \\n{e}\")\n else:\n raise e\n\n def score(self, data: np.ndarray, rd: Dict[int, int] = None) -> float:\n \"\"\"Computes bayesian score of the structure given some data assuming uniform prior\n\n Example\n s = cg.score(data)\n\n Arguments:\n data: (nsamples, nfeatures)\n\n Returns\n s (float): bayesian score\n\n \"\"\"\n s = 0\n r = rd if rd else self.compute_r(data)\n for i in range(self.n):\n s += self.score_node(i, data, r)\n return s\n\n def compute_r(self, data: np.ndarray) -> dict:\n \"\"\"Compute the number of the value for each node\n\n Arguments:\n data (np array): (nsamples, nfeatures)\n Returns\n r (dict): r[i] = r_i\n \"\"\"\n r = {}\n for i in range(self.n):\n r[i] = np.unique(data[:, i]).shape[0]\n return r\n\n def score_node(self, i, data: np.ndarray, r) -> float:\n \"\"\"Compute the score of node i\n\n Arguments:\n i (int): node\n data (np array): (nsamples, nfeatures)\n r (dict of np array): r[i] = nb possible instances of i\n Returns\n s (float): contribution to log score of node i\n \"\"\"\n m, m0 = Counter(), Counter()\n columns = [i] + list(self.parents.get(i))\n extracted_data = data[:, columns]\n # counting nb of each instance of (node, parents) and (parents)\n for sample in extracted_data:\n m[tuple(sample)] += 1\n m0[tuple(sample[1:])] += 1\n # Adding contribution to the score (assuming uniform prior)\n s: float = 0.0\n \"\"\"Like following code snippet\n for c in m0.values():\n s -= gammaln(r[i] + c)\n s += gammaln(r[i])\n \"\"\"\n stat_i = r[i]\n s -= sum(gammaln(stat_i + c) for c in m0.values())\n s += gammaln(stat_i) * len(m0.values())\n \"\"\"Like following code snippet\n for c in m.values():\n s += gammaln(1 + c)\n \"\"\"\n s += sum(gammaln(1 + c) for c in m.values())\n return s\n\n def calculate_parameter(self, data: np.ndarray, rd: Dict[int, int] = None):\n \"\"\"Calculate the edge weight in the graph\n\n Arguments:\n data: samples\n rd: r[i] = r_i\n \"\"\"\n r = rd if rd else self.compute_r(data)\n node_param = {}\n aux_para_cp = {}\n for i in self.parents.keys():\n if i not in node_param:\n node_param[i] = {}\n if i not in aux_para_cp:\n aux_para_cp[i] = {}\n list_par = [i] + list(self.parents[i])\n data_par = data[:, list_par]\n all_count = 0\n column_list = [self.index_to_names[k] for k in list_par]\n for data_line in data_par:\n tup_k = tuple(data_line)\n if tup_k in aux_para_cp[i].keys():\n aux_para_cp[i][tup_k] += 1\n else:\n aux_para_cp[i][tup_k] = 1\n name = \"\"\n for k in range(len(list_par)):\n name += self.index_to_names[list_par[k]] + \" = {} \".format(\n data_line[k]\n )\n if name in node_param[i].keys():\n node_param[i][name] += 1\n else:\n node_param[i][name] = 1\n all_count += 1\n count = 1\n for k_s in r.keys():\n if k_s in list_par:\n count *= r[k_s]\n for tup_key in node_param[i].keys():\n node_param[i][tup_key] = (1 + node_param[i][tup_key]) / (\n count + all_count\n )\n df_res = []\n for tup_key in aux_para_cp[i].keys():\n aux_para_cp[i][tup_key] = (1 + aux_para_cp[i][tup_key]) / (\n count + all_count\n )\n list_tmp = list(tup_key)\n list_tmp.append(aux_para_cp[i][tup_key])\n df_res.append(list_tmp)\n column_list.append(GraphNodeForm.SCORE_COLUMN_NAME)\n p_ = GraphNodeForm(df_res, columns=column_list)\n node_param[i] = p_\n self.para = node_param\n return self.para" }, { "identifier": "Discovery", "path": "openasce/discovery/discovery.py", "snippet": "class Discovery(Runtime):\n \"\"\"Discovery Class\n\n Base class of the causal discovery\n\n Attributes:\n node_names (List[str]): the name of graph node, which should be set before fit\n\n \"\"\"\n\n def __init__(self) -> None:\n super().__init__()\n self._node_names = []\n\n def fit(self, *, X: Union[np.ndarray, Callable], **kwargs) -> None:\n \"\"\"Feed the sample data and search the causal relation on them\n\n Arguments:\n X: Features of the samples.\n\n Returns:\n None\n \"\"\"\n raise NotImplementedError(f\"Not implement for abstract class\")\n\n def get_result(self):\n \"\"\"Output the causal graph\n\n Returns:\n None\n \"\"\"\n raise NotImplementedError(f\"Not implement for abstract class\")\n\n @property\n def node_names(self):\n return self._node_names\n\n @node_names.setter\n def node_names(self, value: List[str]):\n self._node_names = value" }, { "identifier": "GraphNodeForm", "path": "openasce/discovery/graph_node_form.py", "snippet": "class GraphNodeForm(object):\n SCORE_COLUMN_NAME = \"node_score_value\"\n\n def __init__(self, input_data: List[List[float]], columns: List[str]) -> None:\n self._columns = copy.deepcopy(columns) # ['col1', 'col2']\n if GraphNodeForm.SCORE_COLUMN_NAME in columns:\n self._data = np.array(input_data, dtype=np.float64) # np.ndarray\n else:\n self._columns.append(GraphNodeForm.SCORE_COLUMN_NAME)\n self._data = np.array(input_data, dtype=np.float64) # np.ndarray\n self._data = np.column_stack((self._data, np.zeros(self._data.shape[0])))\n self._score_column_index = self._columns.index(GraphNodeForm.SCORE_COLUMN_NAME)\n\n @property\n def size(self):\n return len(self._data)\n\n @property\n def columns(self):\n return self._columns\n\n @property\n def data(self):\n return self._data\n\n @property\n def score_column_index(self):\n return self._score_column_index\n\n def index(self, key: str):\n return self._columns.index(key)\n\n def set_flag_zero(self, key: str, value_list: List[int]) -> None:\n \"\"\"set score column to 0 if the value of key column is not in input value_list\n\n Arguments:\n key: the column name\n value_list: the values need to be set\n Returns:\n None\n \"\"\"\n key_index = self._columns.index(key)\n score_column_index = self._score_column_index\n curr_data = self._data\n for i, row in enumerate(curr_data):\n if int(row[key_index]) not in value_list:\n curr_data[i, score_column_index] = 0\n\n def set_norm(self) -> None:\n \"\"\"normalize the value of score column\"\"\"\n score_column_index = self._score_column_index\n curr_data = self._data\n prob_sum = (\n curr_data[:, score_column_index].sum() + 0.00000001\n ) # avoid zero as divisor\n for row in curr_data:\n row[score_column_index] /= prob_sum\n\n def multiply_score_column(self, key: str, ext) -> None:\n \"\"\"multiply ext's score column to local score column for same key column's value\n\n Arguments:\n key: the column name\n ext (GraphNodeForm): another GraphNodeForm\n Returns:\n None\n \"\"\"\n key_index = self._columns.index(key)\n curr_data = self._data\n score_column_index = self._score_column_index\n external_key_index = ext._columns.index(key)\n external_data = ext._data\n ext_score_column_index = ext._score_column_index\n for row in curr_data:\n for ext_row in external_data:\n if row[key_index] == ext_row[external_key_index]:\n row[score_column_index] *= ext_row[ext_score_column_index]\n\n def sort_by_column(self, key: str) -> None:\n \"\"\"sort specified column\n\n Arguments:\n key: the column name\n\n Returns:\n None\n \"\"\"\n key_index = self._columns.index(key)\n curr_data = self._data\n self._data = np.array(sorted(curr_data, key=lambda x: x[key_index]))\n\n def get_score_deviation(self, addition):\n \"\"\"multiply ext's score column to local score column for same key column's value\n\n Arguments:\n addition: Another GraphNodeForm used to calculate the deviation\n Returns:\n Calculation result\n \"\"\"\n curr_data = self._data\n score_column_index = self._score_column_index\n external_data = addition.data\n ext_score_column_index = addition._score_column_index\n t = np.abs(\n curr_data[:, score_column_index : score_column_index + 1]\n - external_data[:, ext_score_column_index : ext_score_column_index + 1]\n )\n return t.sum()\n\n def get_score_value(self, target_key: str, target_value: int):\n \"\"\"multiply ext's score column to local score column for same key column's value\n\n Arguments:\n target_key: the column name\n target_value: the column value\n\n Returns:\n\n \"\"\"\n key_index = self._columns.index(target_key)\n curr_data = self._data\n score_column_index = self._score_column_index\n for row in curr_data:\n if int(row[key_index]) == target_value:\n return row[score_column_index]\n raise ValueError(f\"Not target value exists\")\n\n def set_groupby_sum(self, key: str):\n \"\"\"multiply ext's score column to local score column for same key column's value\n\n Arguments:\n key: the column name\n\n Returns:\n\n \"\"\"\n key_index = self._columns.index(key)\n curr_data = self._data\n score_column_index = self._score_column_index\n ac = {}\n for row in curr_data:\n if int(row[key_index]) in ac:\n ac[int(row[key_index])] += row[score_column_index]\n else:\n ac[int(row[key_index])] = row[score_column_index]\n result_data = np.zeros(shape=(len(ac), 2), dtype=np.float64)\n line_num = 0\n for k1, value in ac.items():\n result_data[line_num] = np.array([k1, value], dtype=np.float64)\n line_num += 1\n self._data = result_data\n self._columns = [key, GraphNodeForm.SCORE_COLUMN_NAME]\n self._score_column_index = self._columns.index(GraphNodeForm.SCORE_COLUMN_NAME)\n\n def __str__(self):\n np.set_printoptions(threshold=5000, suppress=True)\n return self.columns.__str__() + \"\\n\" + self._data.__str__() + \"\\n\"" }, { "identifier": "InferenceModel", "path": "openasce/inference/inference_model.py", "snippet": "class InferenceModel(Runtime):\n \"\"\"Inference Class\n\n Base class of the causal inference\n\n Attributes:\n\n \"\"\"\n\n CONDITION_DICT_NAME = \"condition\"\n TREATMENT_VALUE = \"treatment_value\"\n LABEL_VALUE = \"label_value\"\n\n def __init__(self) -> None:\n super().__init__()\n\n @property\n def data(self):\n \"\"\"Return the sample data\"\"\"\n raise NotImplementedError(f\"Not implement for abstract class\")\n\n def fit(\n self,\n *,\n X: Iterable[np.ndarray],\n Y: Iterable[np.ndarray],\n T: Iterable[np.ndarray],\n **kwargs,\n ) -> None:\n \"\"\"Feed the sample data and train the model used to effect on the samples.\n\n Arguments:\n X: Features of the samples.\n Y: Outcomes of the samples.\n T: Treatments of the samples.\n\n Returns:\n None\n \"\"\"\n pass\n\n def estimate(\n self,\n *,\n X: Iterable[np.ndarray],\n T: Iterable[np.ndarray],\n **kwargs,\n ) -> None:\n \"\"\"Feed the sample data and estimate the effect on the samples\n\n Arguments:\n X: Features of the samples.\n T: Treatments of the samples.\n\n Returns:\n None\n \"\"\"\n pass\n\n def get_result(self) -> Any:\n \"\"\"Get the estimated result\n\n The sub-class should implement this routine and runtime invokes it.\n\n Returns:\n The estimation result.\n \"\"\"\n return self._estimate_result\n\n def output(self, output_path: str) -> None:\n \"\"\"Output the estimated result to files\n\n The sub-class should implement this routine and runtime invokes it.\n\n Arguments:\n output_path: The path of output file.\n\n Returns:\n None\n \"\"\"\n from numpy import savetxt\n\n savetxt(output_path, self.get_result())\n logger.info(f\"Write result to file: {output_path}\")\n\n def _wrap_fit(m):\n @wraps(m)\n def call(self, *, X, Y, T, **kwargs):\n self._prefit(Y, T, X=X, **kwargs)\n # call the wrapped fit method\n m(self, X=X, Y=Y, T=T, **kwargs)\n self._postfit(Y, T, X=X, **kwargs)\n return self\n\n return call" }, { "identifier": "logger", "path": "openasce/utils/logger.py", "snippet": "GLOBAL_LOGGER_NAME = \"openasce-log\"\nDEFAULT_FORMAT = (\n \"[%(asctime)s] [%(levelname)s] [%(filename)s:%(lineno)d:%(funcName)s] %(message)s\"\n)\nDEFAULT_FORMATTER = logging.Formatter(DEFAULT_FORMAT)\ndef init_custom_logger(name):\nclass openasceLogger(object):" } ]
import copy import numpy as np from functools import reduce from typing import Dict, Iterable, List from openasce.discovery.causal_graph import CausalGraph from openasce.discovery.discovery import Discovery from openasce.discovery.graph_node_form import GraphNodeForm from openasce.inference.inference_model import InferenceModel from openasce.utils.logger import logger
7,237
# Copyright 2023 AntGroup CO., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. class GraphInferModel(InferenceModel): """The inference using the causal graph Attributes: graph: The causal graph. If not set, the class will try to find it out if discovery is available. column_names: all names of sample treatment_name: treatment column name in column_names label_name: target column name in column_names """ def __init__( self, *, graph: CausalGraph = None, column_names: List[str] = None, treatment_name: str = None, label_name: str = None, num_iteration=20, ) -> None: """ Arguments: graph: causal graph column_names: all names of column treatment_name: the name of treatment column label_name: the name of target name """ super().__init__() self._graph = graph self._column_names = column_names self._treatment_name = treatment_name self._label_name = label_name self._discovery = None self._data = None self._num_iteration = num_iteration self._label_value = None @property def data(self): assert self._data is not None, f"Must have sample data." return self._data @property def graph(self): assert self._graph is not None, "The graph object should be set" return self._graph @graph.setter def graph(self, value): assert self._graph is None, "The graph object should be set once only" self._graph = value # graph is available, set the column names using graph columns self.column_names = list(self.graph.names_to_index.keys()) @property def column_names(self): """All nodes' name. Note: should include the treatment node and label node. """ assert self._column_names is not None, "The column names should be set" return self._column_names @column_names.setter def column_names(self, value: List[str]): assert self._column_names is None, "The column names should be set once only" self._column_names = value @property def treatment_name(self): assert self._treatment_name is not None, "The treatment name should be set" return self._treatment_name @treatment_name.setter def treatment_name(self, value: str): assert ( self._treatment_name is None ), "The treatment name should be set once only" self._treatment_name = value @property def label_name(self): assert self._label_name is not None, "The label name should be set" return self._label_name @label_name.setter def label_name(self, value: str): assert self._label_name is None, "The label name should be set once only" self._label_name = value @property
# Copyright 2023 AntGroup CO., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. class GraphInferModel(InferenceModel): """The inference using the causal graph Attributes: graph: The causal graph. If not set, the class will try to find it out if discovery is available. column_names: all names of sample treatment_name: treatment column name in column_names label_name: target column name in column_names """ def __init__( self, *, graph: CausalGraph = None, column_names: List[str] = None, treatment_name: str = None, label_name: str = None, num_iteration=20, ) -> None: """ Arguments: graph: causal graph column_names: all names of column treatment_name: the name of treatment column label_name: the name of target name """ super().__init__() self._graph = graph self._column_names = column_names self._treatment_name = treatment_name self._label_name = label_name self._discovery = None self._data = None self._num_iteration = num_iteration self._label_value = None @property def data(self): assert self._data is not None, f"Must have sample data." return self._data @property def graph(self): assert self._graph is not None, "The graph object should be set" return self._graph @graph.setter def graph(self, value): assert self._graph is None, "The graph object should be set once only" self._graph = value # graph is available, set the column names using graph columns self.column_names = list(self.graph.names_to_index.keys()) @property def column_names(self): """All nodes' name. Note: should include the treatment node and label node. """ assert self._column_names is not None, "The column names should be set" return self._column_names @column_names.setter def column_names(self, value: List[str]): assert self._column_names is None, "The column names should be set once only" self._column_names = value @property def treatment_name(self): assert self._treatment_name is not None, "The treatment name should be set" return self._treatment_name @treatment_name.setter def treatment_name(self, value: str): assert ( self._treatment_name is None ), "The treatment name should be set once only" self._treatment_name = value @property def label_name(self): assert self._label_name is not None, "The label name should be set" return self._label_name @label_name.setter def label_name(self, value: str): assert self._label_name is None, "The label name should be set once only" self._label_name = value @property
def discovery(self) -> Discovery:
1
2023-12-06 05:54:36+00:00
12k
eclipse-t2i/eclipse-inference
main.py
[ { "identifier": "PriorTransformer", "path": "src/priors/prior_transformer.py", "snippet": "class PriorTransformer(ModelMixin, ConfigMixin):\n \"\"\"\n A Prior Transformer model.\n\n Parameters:\n num_attention_heads (`int`, *optional*, defaults to 32): The number of heads to use for multi-head attention.\n attention_head_dim (`int`, *optional*, defaults to 64): The number of channels in each head.\n num_layers (`int`, *optional*, defaults to 20): The number of layers of Transformer blocks to use.\n embedding_dim (`int`, *optional*, defaults to 768): The dimension of the model input `hidden_states`\n num_embeddings (`int`, *optional*, defaults to 77):\n The number of embeddings of the model input `hidden_states`\n additional_embeddings (`int`, *optional*, defaults to 4): The number of additional tokens appended to the\n projected `hidden_states`. The actual length of the used `hidden_states` is `num_embeddings +\n additional_embeddings`.\n dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.\n time_embed_act_fn (`str`, *optional*, defaults to 'silu'):\n The activation function to use to create timestep embeddings.\n norm_in_type (`str`, *optional*, defaults to None): The normalization layer to apply on hidden states before\n passing to Transformer blocks. Set it to `None` if normalization is not needed.\n embedding_proj_norm_type (`str`, *optional*, defaults to None):\n The normalization layer to apply on the input `proj_embedding`. Set it to `None` if normalization is not\n needed.\n encoder_hid_proj_type (`str`, *optional*, defaults to `linear`):\n The projection layer to apply on the input `encoder_hidden_states`. Set it to `None` if\n `encoder_hidden_states` is `None`.\n added_emb_type (`str`, *optional*, defaults to `prd`): Additional embeddings to condition the model.\n Choose from `prd` or `None`. if choose `prd`, it will prepend a token indicating the (quantized) dot\n product between the text embedding and image embedding as proposed in the unclip paper\n https://arxiv.org/abs/2204.06125 If it is `None`, no additional embeddings will be prepended.\n time_embed_dim (`int, *optional*, defaults to None): The dimension of timestep embeddings.\n If None, will be set to `num_attention_heads * attention_head_dim`\n embedding_proj_dim (`int`, *optional*, default to None):\n The dimension of `proj_embedding`. If None, will be set to `embedding_dim`.\n clip_embed_dim (`int`, *optional*, default to None):\n The dimension of the output. If None, will be set to `embedding_dim`.\n \"\"\"\n\n @register_to_config\n def __init__(\n self,\n num_attention_heads: int = 32,\n attention_head_dim: int = 64,\n num_layers: int = 20,\n embedding_dim: int = 768,\n num_embeddings=77,\n additional_embeddings=3, # as we have remvoed the time embedding\n dropout: float = 0.0,\n # time_embed_act_fn: str = \"silu\",\n norm_in_type: Optional[str] = None, # layer\n embedding_proj_norm_type: Optional[str] = None, # layer\n encoder_hid_proj_type: Optional[str] = \"linear\", # linear\n added_emb_type: Optional[str] = \"prd\", # prd\n # time_embed_dim: Optional[int] = None,\n embedding_proj_dim: Optional[int] = None,\n clip_embed_dim: Optional[int] = None,\n ):\n super().__init__()\n self.num_attention_heads = num_attention_heads\n self.attention_head_dim = attention_head_dim\n inner_dim = num_attention_heads * attention_head_dim\n self.additional_embeddings = additional_embeddings\n\n # time_embed_dim = time_embed_dim or inner_dim\n embedding_proj_dim = embedding_proj_dim or embedding_dim\n clip_embed_dim = clip_embed_dim or embedding_dim\n\n # self.time_proj = Timesteps(inner_dim, True, 0)\n # self.time_embedding = TimestepEmbedding(inner_dim, time_embed_dim, out_dim=inner_dim, act_fn=time_embed_act_fn)\n\n self.proj_in = nn.Linear(embedding_dim, inner_dim)\n\n if embedding_proj_norm_type is None:\n self.embedding_proj_norm = None\n elif embedding_proj_norm_type == \"layer\":\n self.embedding_proj_norm = nn.LayerNorm(embedding_proj_dim)\n else:\n raise ValueError(f\"unsupported embedding_proj_norm_type: {embedding_proj_norm_type}\")\n\n self.embedding_proj = nn.Linear(embedding_proj_dim, inner_dim)\n\n if encoder_hid_proj_type is None:\n self.encoder_hidden_states_proj = None\n elif encoder_hid_proj_type == \"linear\":\n self.encoder_hidden_states_proj = nn.Linear(embedding_dim, inner_dim)\n else:\n raise ValueError(f\"unsupported encoder_hid_proj_type: {encoder_hid_proj_type}\")\n\n self.positional_embedding = nn.Parameter(torch.zeros(1, num_embeddings + additional_embeddings, inner_dim))\n\n if added_emb_type == \"prd\":\n self.prd_embedding = nn.Parameter(torch.zeros(1, 1, inner_dim))\n elif added_emb_type is None:\n self.prd_embedding = None\n else:\n raise ValueError(\n f\"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.\"\n )\n\n self.transformer_blocks = nn.ModuleList(\n [\n BasicTransformerBlock(\n inner_dim,\n num_attention_heads,\n attention_head_dim,\n dropout=dropout,\n activation_fn=\"gelu\",\n attention_bias=True,\n )\n for d in range(num_layers)\n ]\n )\n\n if norm_in_type == \"layer\":\n self.norm_in = nn.LayerNorm(inner_dim)\n elif norm_in_type is None:\n self.norm_in = None\n else:\n raise ValueError(f\"Unsupported norm_in_type: {norm_in_type}.\")\n\n self.norm_out = nn.LayerNorm(inner_dim)\n\n self.proj_to_clip_embeddings = nn.Linear(inner_dim, clip_embed_dim)\n\n causal_attention_mask = torch.full(\n [num_embeddings + additional_embeddings, num_embeddings + additional_embeddings], -10000.0\n )\n causal_attention_mask.triu_(1)\n causal_attention_mask = causal_attention_mask[None, ...]\n self.register_buffer(\"causal_attention_mask\", causal_attention_mask, persistent=False)\n\n self.clip_mean = nn.Parameter(torch.zeros(1, clip_embed_dim))\n self.clip_std = nn.Parameter(torch.zeros(1, clip_embed_dim))\n\n @property\n # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors\n def attn_processors(self) -> Dict[str, AttentionProcessor]:\n r\"\"\"\n Returns:\n `dict` of attention processors: A dictionary containing all attention processors used in the model with\n indexed by its weight name.\n \"\"\"\n # set recursively\n processors = {}\n\n def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):\n if hasattr(module, \"set_processor\"):\n processors[f\"{name}.processor\"] = module.processor\n\n for sub_name, child in module.named_children():\n fn_recursive_add_processors(f\"{name}.{sub_name}\", child, processors)\n\n return processors\n\n for name, module in self.named_children():\n fn_recursive_add_processors(name, module, processors)\n\n return processors\n\n # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_attn_processor\n def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):\n r\"\"\"\n Sets the attention processor to use to compute attention.\n\n Parameters:\n processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):\n The instantiated processor class or a dictionary of processor classes that will be set as the processor\n for **all** `Attention` layers.\n\n If `processor` is a dict, the key needs to define the path to the corresponding cross attention\n processor. This is strongly recommended when setting trainable attention processors.\n\n \"\"\"\n count = len(self.attn_processors.keys())\n\n if isinstance(processor, dict) and len(processor) != count:\n raise ValueError(\n f\"A dict of processors was passed, but the number of processors {len(processor)} does not match the\"\n f\" number of attention layers: {count}. Please make sure to pass {count} processor classes.\"\n )\n\n def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):\n if hasattr(module, \"set_processor\"):\n if not isinstance(processor, dict):\n module.set_processor(processor)\n else:\n module.set_processor(processor.pop(f\"{name}.processor\"))\n\n for sub_name, child in module.named_children():\n fn_recursive_attn_processor(f\"{name}.{sub_name}\", child, processor)\n\n for name, module in self.named_children():\n fn_recursive_attn_processor(name, module, processor)\n\n # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor\n def set_default_attn_processor(self):\n \"\"\"\n Disables custom attention processors and sets the default attention implementation.\n \"\"\"\n self.set_attn_processor(AttnProcessor())\n\n def forward(\n self,\n hidden_states,\n # timestep: Union[torch.Tensor, float, int],\n proj_embedding: torch.FloatTensor,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n attention_mask: Optional[torch.BoolTensor] = None,\n return_dict: bool = True,\n ):\n \"\"\"\n The [`PriorTransformer`] forward method.\n\n Args:\n hidden_states (`torch.FloatTensor` of shape `(batch_size, embedding_dim)`):\n The currently predicted image embeddings.\n timestep (`torch.LongTensor`):\n Current denoising step.\n proj_embedding (`torch.FloatTensor` of shape `(batch_size, embedding_dim)`):\n Projected embedding vector the denoising process is conditioned on.\n encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, num_embeddings, embedding_dim)`):\n Hidden states of the text embeddings the denoising process is conditioned on.\n attention_mask (`torch.BoolTensor` of shape `(batch_size, num_embeddings)`):\n Text mask for the text embeddings.\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`~models.prior_transformer.PriorTransformerOutput`] instead of a plain\n tuple.\n\n Returns:\n [`~models.prior_transformer.PriorTransformerOutput`] or `tuple`:\n If return_dict is True, a [`~models.prior_transformer.PriorTransformerOutput`] is returned, otherwise a\n tuple is returned where the first element is the sample tensor.\n \"\"\"\n batch_size = hidden_states.shape[0]\n\n # timesteps = timestep\n # if not torch.is_tensor(timesteps):\n # timesteps = torch.tensor([timesteps], dtype=torch.long, device=hidden_states.device)\n # elif torch.is_tensor(timesteps) and len(timesteps.shape) == 0:\n # timesteps = timesteps[None].to(hidden_states.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n # timesteps = timesteps * torch.ones(batch_size, dtype=timesteps.dtype, device=timesteps.device)\n\n # timesteps_projected = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might be fp16, so we need to cast here.\n # timesteps_projected = timesteps_projected.to(dtype=self.dtype)\n # time_embeddings = self.time_embedding(timesteps_projected)\n\n if self.embedding_proj_norm is not None:\n proj_embedding = self.embedding_proj_norm(proj_embedding)\n\n proj_embeddings = self.embedding_proj(proj_embedding)\n if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:\n encoder_hidden_states = self.encoder_hidden_states_proj(encoder_hidden_states)\n elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:\n raise ValueError(\"`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set\")\n\n hidden_states = self.proj_in(hidden_states)\n\n positional_embeddings = self.positional_embedding.to(hidden_states.dtype)\n\n additional_embeds = []\n additional_embeddings_len = 0\n\n if encoder_hidden_states is not None:\n additional_embeds.append(encoder_hidden_states)\n additional_embeddings_len += encoder_hidden_states.shape[1]\n\n if len(proj_embeddings.shape) == 2:\n proj_embeddings = proj_embeddings[:, None, :]\n\n if len(hidden_states.shape) == 2:\n hidden_states = hidden_states[:, None, :]\n\n additional_embeds = additional_embeds + [\n proj_embeddings,\n # time_embeddings[:, None, :],\n hidden_states,\n ]\n\n if self.prd_embedding is not None:\n prd_embedding = self.prd_embedding.to(hidden_states.dtype).expand(batch_size, -1, -1)\n additional_embeds.append(prd_embedding)\n\n hidden_states = torch.cat(\n additional_embeds,\n dim=1,\n )\n\n # Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens\n additional_embeddings_len = additional_embeddings_len + proj_embeddings.shape[1] + 1\n if positional_embeddings.shape[1] < hidden_states.shape[1]:\n positional_embeddings = F.pad(\n positional_embeddings,\n (\n 0,\n 0,\n additional_embeddings_len,\n self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,\n ),\n value=0.0,\n )\n\n hidden_states = hidden_states + positional_embeddings\n\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(hidden_states.dtype)) * -10000.0\n attention_mask = F.pad(attention_mask, (0, self.additional_embeddings), value=0.0)\n attention_mask = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype)\n attention_mask = attention_mask.repeat_interleave(self.config.num_attention_heads, dim=0)\n\n if self.norm_in is not None:\n hidden_states = self.norm_in(hidden_states)\n\n for block in self.transformer_blocks:\n hidden_states = block(hidden_states, attention_mask=attention_mask)\n\n hidden_states = self.norm_out(hidden_states)\n\n if self.prd_embedding is not None:\n hidden_states = hidden_states[:, -1]\n else:\n hidden_states = hidden_states[:, additional_embeddings_len:]\n\n predicted_image_embedding = self.proj_to_clip_embeddings(hidden_states)\n\n if not return_dict:\n return (predicted_image_embedding,)\n\n return PriorTransformerOutput(predicted_image_embedding=predicted_image_embedding)\n\n def post_process_latents(self, prior_latents):\n prior_latents = (prior_latents * self.clip_std) + self.clip_mean\n return prior_latents" }, { "identifier": "KandinskyPriorPipeline", "path": "src/pipelines/pipeline_kandinsky_prior.py", "snippet": "class KandinskyPriorPipeline(DiffusionPipeline):\n \"\"\"\n Pipeline for generating image prior for Kandinsky\n\n This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the\n library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)\n\n Args:\n prior ([`PriorTransformer`]):\n The canonincal unCLIP prior to approximate the image embedding from the text embedding.\n image_encoder ([`CLIPVisionModelWithProjection`]):\n Frozen image-encoder.\n text_encoder ([`CLIPTextModelWithProjection`]):\n Frozen text-encoder.\n tokenizer (`CLIPTokenizer`):\n Tokenizer of class\n [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).\n scheduler ([`UnCLIPScheduler`]):\n A scheduler to be used in combination with `prior` to generate image embedding.\n \"\"\"\n\n _exclude_from_cpu_offload = [\"prior\"]\n\n def __init__(\n self,\n prior: PriorTransformer,\n image_encoder: CLIPVisionModelWithProjection,\n text_encoder: CLIPTextModelWithProjection,\n tokenizer: CLIPTokenizer,\n scheduler: UnCLIPScheduler,\n image_processor: CLIPImageProcessor,\n ):\n super().__init__()\n\n self.register_modules(\n prior=prior,\n text_encoder=text_encoder,\n tokenizer=tokenizer,\n scheduler=scheduler,\n image_encoder=image_encoder,\n image_processor=image_processor,\n )\n\n @torch.no_grad()\n @replace_example_docstring(EXAMPLE_INTERPOLATE_DOC_STRING)\n def interpolate(\n self,\n images_and_prompts: List[Union[str, PIL.Image.Image, torch.FloatTensor]],\n weights: List[float],\n num_images_per_prompt: int = 1,\n num_inference_steps: int = 25,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n latents: Optional[torch.FloatTensor] = None,\n negative_prior_prompt: Optional[str] = None,\n negative_prompt: str = \"\",\n guidance_scale: float = 4.0,\n device=None,\n ):\n \"\"\"\n Function invoked when using the prior pipeline for interpolation.\n\n Args:\n images_and_prompts (`List[Union[str, PIL.Image.Image, torch.FloatTensor]]`):\n list of prompts and images to guide the image generation.\n weights: (`List[float]`):\n list of weights for each condition in `images_and_prompts`\n num_images_per_prompt (`int`, *optional*, defaults to 1):\n The number of images to generate per prompt.\n num_inference_steps (`int`, *optional*, defaults to 25):\n The number of denoising steps. More denoising steps usually lead to a higher quality image at the\n expense of slower inference.\n generator (`torch.Generator` or `List[torch.Generator]`, *optional*):\n One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)\n to make generation deterministic.\n latents (`torch.FloatTensor`, *optional*):\n Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image\n generation. Can be used to tweak the same generation with different prompts. If not provided, a latents\n tensor will ge generated by sampling using the supplied random `generator`.\n negative_prior_prompt (`str`, *optional*):\n The prompt not to guide the prior diffusion process. Ignored when not using guidance (i.e., ignored if\n `guidance_scale` is less than `1`).\n negative_prompt (`str` or `List[str]`, *optional*):\n The prompt not to guide the image generation. Ignored when not using guidance (i.e., ignored if\n `guidance_scale` is less than `1`).\n guidance_scale (`float`, *optional*, defaults to 4.0):\n Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).\n `guidance_scale` is defined as `w` of equation 2. of [Imagen\n Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >\n 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,\n usually at the expense of lower image quality.\n\n Examples:\n\n Returns:\n [`KandinskyPriorPipelineOutput`] or `tuple`\n \"\"\"\n\n device = device or self.device\n\n if len(images_and_prompts) != len(weights):\n raise ValueError(\n f\"`images_and_prompts` contains {len(images_and_prompts)} items and `weights` contains {len(weights)} items - they should be lists of same length\"\n )\n\n image_embeddings = []\n for cond, weight in zip(images_and_prompts, weights):\n if isinstance(cond, str):\n image_emb = self(\n cond,\n num_inference_steps=num_inference_steps,\n num_images_per_prompt=num_images_per_prompt,\n generator=generator,\n latents=latents,\n negative_prompt=negative_prior_prompt,\n guidance_scale=guidance_scale,\n ).image_embeds\n\n elif isinstance(cond, (PIL.Image.Image, torch.Tensor)):\n if isinstance(cond, PIL.Image.Image):\n cond = (\n self.image_processor(cond, return_tensors=\"pt\")\n .pixel_values[0]\n .unsqueeze(0)\n .to(dtype=self.image_encoder.dtype, device=device)\n )\n\n image_emb = self.image_encoder(cond)[\"image_embeds\"]\n\n else:\n raise ValueError(\n f\"`images_and_prompts` can only contains elements to be of type `str`, `PIL.Image.Image` or `torch.Tensor` but is {type(cond)}\"\n )\n\n image_embeddings.append(image_emb * weight)\n\n image_emb = torch.cat(image_embeddings).sum(dim=0, keepdim=True)\n\n out_zero = self(\n negative_prompt,\n num_inference_steps=num_inference_steps,\n num_images_per_prompt=num_images_per_prompt,\n generator=generator,\n latents=latents,\n negative_prompt=negative_prior_prompt,\n guidance_scale=guidance_scale,\n )\n zero_image_emb = (\n out_zero.negative_image_embeds\n if negative_prompt == \"\"\n else out_zero.image_embeds\n )\n\n return KandinskyPriorPipelineOutput(\n image_embeds=image_emb, negative_image_embeds=zero_image_emb\n )\n\n # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents\n def prepare_latents(self, shape, dtype, device, generator, latents, scheduler):\n if latents is None:\n latents = randn_tensor(\n shape, generator=generator, device=device, dtype=dtype\n )\n else:\n if latents.shape != shape:\n raise ValueError(\n f\"Unexpected latents shape, got {latents.shape}, expected {shape}\"\n )\n latents = latents.to(device)\n\n latents = latents * scheduler.init_noise_sigma\n return latents\n\n def get_zero_embed(self, batch_size=1, device=None):\n device = device or self.device\n zero_img = torch.zeros(\n 1,\n 3,\n self.image_encoder.config.image_size,\n self.image_encoder.config.image_size,\n ).to(device=device, dtype=self.image_encoder.dtype)\n zero_image_emb = self.image_encoder(zero_img)[\"image_embeds\"]\n zero_image_emb = zero_image_emb.repeat(batch_size, 1)\n return zero_image_emb\n\n def _encode_prompt(\n self,\n prompt,\n device,\n num_images_per_prompt,\n do_classifier_free_guidance,\n negative_prompt=None,\n ):\n batch_size = len(prompt) if isinstance(prompt, list) else 1\n # get prompt text embeddings\n text_inputs = self.tokenizer(\n prompt,\n padding=\"max_length\",\n max_length=self.tokenizer.model_max_length,\n truncation=True,\n return_tensors=\"pt\",\n )\n text_input_ids = text_inputs.input_ids\n text_mask = text_inputs.attention_mask.bool().to(device)\n\n untruncated_ids = self.tokenizer(\n prompt, padding=\"longest\", return_tensors=\"pt\"\n ).input_ids\n\n if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(\n text_input_ids, untruncated_ids\n ):\n removed_text = self.tokenizer.batch_decode(\n untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]\n )\n logger.warning(\n \"The following part of your input was truncated because CLIP can only handle sequences up to\"\n f\" {self.tokenizer.model_max_length} tokens: {removed_text}\"\n )\n text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length]\n\n text_encoder_output = self.text_encoder(text_input_ids.to(device))\n\n prompt_embeds = text_encoder_output.text_embeds\n text_encoder_hidden_states = text_encoder_output.last_hidden_state\n\n prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0)\n text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(\n num_images_per_prompt, dim=0\n )\n text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0)\n\n if do_classifier_free_guidance:\n uncond_tokens: List[str]\n if negative_prompt is None:\n uncond_tokens = [\"\"] * batch_size\n elif type(prompt) is not type(negative_prompt):\n raise TypeError(\n f\"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=\"\n f\" {type(prompt)}.\"\n )\n elif isinstance(negative_prompt, str):\n uncond_tokens = [negative_prompt]\n elif batch_size != len(negative_prompt):\n raise ValueError(\n f\"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:\"\n f\" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches\"\n \" the batch size of `prompt`.\"\n )\n else:\n uncond_tokens = negative_prompt\n\n uncond_input = self.tokenizer(\n uncond_tokens,\n padding=\"max_length\",\n max_length=self.tokenizer.model_max_length,\n truncation=True,\n return_tensors=\"pt\",\n )\n uncond_text_mask = uncond_input.attention_mask.bool().to(device)\n negative_prompt_embeds_text_encoder_output = self.text_encoder(\n uncond_input.input_ids.to(device)\n )\n\n negative_prompt_embeds = (\n negative_prompt_embeds_text_encoder_output.text_embeds\n )\n uncond_text_encoder_hidden_states = (\n negative_prompt_embeds_text_encoder_output.last_hidden_state\n )\n\n # duplicate unconditional embeddings for each generation per prompt, using mps friendly method\n\n seq_len = negative_prompt_embeds.shape[1]\n negative_prompt_embeds = negative_prompt_embeds.repeat(\n 1, num_images_per_prompt\n )\n negative_prompt_embeds = negative_prompt_embeds.view(\n batch_size * num_images_per_prompt, seq_len\n )\n\n seq_len = uncond_text_encoder_hidden_states.shape[1]\n uncond_text_encoder_hidden_states = (\n uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1)\n )\n uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view(\n batch_size * num_images_per_prompt, seq_len, -1\n )\n uncond_text_mask = uncond_text_mask.repeat_interleave(\n num_images_per_prompt, dim=0\n )\n\n # done duplicates\n\n # For classifier free guidance, we need to do two forward passes.\n # Here we concatenate the unconditional and text embeddings into a single batch\n # to avoid doing two forward passes\n prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])\n text_encoder_hidden_states = torch.cat(\n [uncond_text_encoder_hidden_states, text_encoder_hidden_states]\n )\n\n text_mask = torch.cat([uncond_text_mask, text_mask])\n\n return prompt_embeds, text_encoder_hidden_states, text_mask\n\n def enable_model_cpu_offload(self, gpu_id=0):\n r\"\"\"\n Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared\n to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`\n method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with\n `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.\n \"\"\"\n if is_accelerate_available() and is_accelerate_version(\">=\", \"0.17.0.dev0\"):\n from accelerate import cpu_offload_with_hook\n else:\n raise ImportError(\n \"`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.\"\n )\n\n device = torch.device(f\"cuda:{gpu_id}\")\n\n if self.device.type != \"cpu\":\n self.to(\"cpu\", silence_dtype_warnings=True)\n torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)\n\n hook = None\n for cpu_offloaded_model in [self.text_encoder, self.prior]:\n _, hook = cpu_offload_with_hook(\n cpu_offloaded_model, device, prev_module_hook=hook\n )\n\n # We'll offload the last model manually.\n self.prior_hook = hook\n\n _, hook = cpu_offload_with_hook(\n self.image_encoder, device, prev_module_hook=self.prior_hook\n )\n\n self.final_offload_hook = hook\n\n @torch.no_grad()\n @replace_example_docstring(EXAMPLE_DOC_STRING)\n def __call__(\n self,\n prompt: Union[str, List[str]],\n negative_prompt: Optional[Union[str, List[str]]] = None,\n num_images_per_prompt: int = 1,\n num_inference_steps: int = 25,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n latents: Optional[torch.FloatTensor] = None,\n guidance_scale: float = 4.0,\n output_type: Optional[str] = \"pt\",\n return_dict: bool = True,\n ):\n \"\"\"\n Function invoked when calling the pipeline for generation.\n\n Args:\n prompt (`str` or `List[str]`):\n The prompt or prompts to guide the image generation.\n negative_prompt (`str` or `List[str]`, *optional*):\n The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored\n if `guidance_scale` is less than `1`).\n num_images_per_prompt (`int`, *optional*, defaults to 1):\n The number of images to generate per prompt.\n num_inference_steps (`int`, *optional*, defaults to 25):\n The number of denoising steps. More denoising steps usually lead to a higher quality image at the\n expense of slower inference.\n generator (`torch.Generator` or `List[torch.Generator]`, *optional*):\n One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)\n to make generation deterministic.\n latents (`torch.FloatTensor`, *optional*):\n Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image\n generation. Can be used to tweak the same generation with different prompts. If not provided, a latents\n tensor will ge generated by sampling using the supplied random `generator`.\n guidance_scale (`float`, *optional*, defaults to 4.0):\n Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).\n `guidance_scale` is defined as `w` of equation 2. of [Imagen\n Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >\n 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,\n usually at the expense of lower image quality.\n output_type (`str`, *optional*, defaults to `\"pt\"`):\n The output format of the generate image. Choose between: `\"np\"` (`np.array`) or `\"pt\"`\n (`torch.Tensor`).\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.\n\n Examples:\n\n Returns:\n [`KandinskyPriorPipelineOutput`] or `tuple`\n \"\"\"\n\n if isinstance(prompt, str):\n prompt = [prompt]\n elif not isinstance(prompt, list):\n raise ValueError(\n f\"`prompt` has to be of type `str` or `list` but is {type(prompt)}\"\n )\n\n if isinstance(negative_prompt, str):\n negative_prompt = [negative_prompt]\n elif not isinstance(negative_prompt, list) and negative_prompt is not None:\n raise ValueError(\n f\"`negative_prompt` has to be of type `str` or `list` but is {type(negative_prompt)}\"\n )\n\n # if the negative prompt is defined we double the batch size to\n # directly retrieve the negative prompt embedding\n if negative_prompt is not None:\n prompt = prompt + negative_prompt\n negative_prompt = 2 * negative_prompt\n\n device = self._execution_device\n\n batch_size = len(prompt)\n batch_size = batch_size * num_images_per_prompt\n\n prompt_embeds, text_encoder_hidden_states, text_mask = self._encode_prompt(\n prompt, device, num_images_per_prompt, False, negative_prompt\n )\n\n hidden_states = randn_tensor(\n (batch_size, prompt_embeds.shape[-1]),\n device=prompt_embeds.device,\n dtype=prompt_embeds.dtype,\n generator=generator,\n )\n\n latents = self.prior(\n hidden_states,\n proj_embedding=prompt_embeds,\n encoder_hidden_states=text_encoder_hidden_states,\n attention_mask=text_mask,\n ).predicted_image_embedding\n\n image_embeddings = latents\n\n # if negative prompt has been defined, we retrieve split the image embedding into two\n if negative_prompt is None:\n zero_embeds = self.get_zero_embed(latents.shape[0], device=latents.device)\n\n if (\n hasattr(self, \"final_offload_hook\")\n and self.final_offload_hook is not None\n ):\n self.final_offload_hook.offload()\n else:\n image_embeddings, zero_embeds = image_embeddings.chunk(2)\n\n if (\n hasattr(self, \"final_offload_hook\")\n and self.final_offload_hook is not None\n ):\n self.prior_hook.offload()\n\n if output_type not in [\"pt\", \"np\"]:\n raise ValueError(\n f\"Only the output types `pt` and `np` are supported not output_type={output_type}\"\n )\n\n if output_type == \"np\":\n image_embeddings = image_embeddings.cpu().numpy()\n zero_embeds = zero_embeds.cpu().numpy()\n\n if not return_dict:\n return (image_embeddings, zero_embeds)\n\n return KandinskyPriorPipelineOutput(\n image_embeds=image_embeddings, negative_image_embeds=zero_embeds\n )" } ]
import gradio as gr import torch import math import numpy as np import torch from PIL import Image from torchvision import transforms from transformers import ( CLIPProcessor, CLIPModel, CLIPTokenizer, CLIPTextModelWithProjection, CLIPVisionModelWithProjection, CLIPFeatureExtractor, ) from typing import List from PIL import Image, ImageChops from diffusers import UnCLIPPipeline from transformers import CLIPTokenizer from src.priors.prior_transformer import ( PriorTransformer, ) # original huggingface prior transformer without time conditioning from src.pipelines.pipeline_kandinsky_prior import KandinskyPriorPipeline from diffusers import DiffusionPipeline
8,651
# from diffusers.utils.torch_utils import randn_tensor __DEVICE__ = "cpu" if torch.cuda.is_available(): __DEVICE__ = "cuda" class Ours: def __init__(self, device): text_encoder = ( CLIPTextModelWithProjection.from_pretrained( "laion/CLIP-ViT-bigG-14-laion2B-39B-b160k", projection_dim=1280, torch_dtype=torch.float16, ) .eval() .requires_grad_(False) ) tokenizer = CLIPTokenizer.from_pretrained( "laion/CLIP-ViT-bigG-14-laion2B-39B-b160k" )
# from diffusers.utils.torch_utils import randn_tensor __DEVICE__ = "cpu" if torch.cuda.is_available(): __DEVICE__ = "cuda" class Ours: def __init__(self, device): text_encoder = ( CLIPTextModelWithProjection.from_pretrained( "laion/CLIP-ViT-bigG-14-laion2B-39B-b160k", projection_dim=1280, torch_dtype=torch.float16, ) .eval() .requires_grad_(False) ) tokenizer = CLIPTokenizer.from_pretrained( "laion/CLIP-ViT-bigG-14-laion2B-39B-b160k" )
prior = PriorTransformer.from_pretrained(
0
2023-12-07 05:17:08+00:00
12k
AIFSH/NativeDancer
nativedancer/third_part/densepose/modeling/losses/cse.py
[ { "identifier": "CfgNode", "path": "nativedancer/third_part/detectron2/config/config.py", "snippet": "class CfgNode(_CfgNode):\n \"\"\"\n The same as `fvcore.common.config.CfgNode`, but different in:\n\n 1. Use unsafe yaml loading by default.\n Note that this may lead to arbitrary code execution: you must not\n load a config file from untrusted sources before manually inspecting\n the content of the file.\n 2. Support config versioning.\n When attempting to merge an old config, it will convert the old config automatically.\n\n .. automethod:: clone\n .. automethod:: freeze\n .. automethod:: defrost\n .. automethod:: is_frozen\n .. automethod:: load_yaml_with_base\n .. automethod:: merge_from_list\n .. automethod:: merge_from_other_cfg\n \"\"\"\n\n @classmethod\n def _open_cfg(cls, filename):\n return PathManager.open(filename, \"r\")\n\n # Note that the default value of allow_unsafe is changed to True\n def merge_from_file(self, cfg_filename: str, allow_unsafe: bool = True) -> None:\n \"\"\"\n Load content from the given config file and merge it into self.\n\n Args:\n cfg_filename: config filename\n allow_unsafe: allow unsafe yaml syntax\n \"\"\"\n assert PathManager.isfile(cfg_filename), f\"Config file '{cfg_filename}' does not exist!\"\n loaded_cfg = self.load_yaml_with_base(cfg_filename, allow_unsafe=allow_unsafe)\n loaded_cfg = type(self)(loaded_cfg)\n\n # defaults.py needs to import CfgNode\n from .defaults import _C\n\n latest_ver = _C.VERSION\n assert (\n latest_ver == self.VERSION\n ), \"CfgNode.merge_from_file is only allowed on a config object of latest version!\"\n\n logger = logging.getLogger(__name__)\n\n loaded_ver = loaded_cfg.get(\"VERSION\", None)\n if loaded_ver is None:\n from .compat import guess_version\n\n loaded_ver = guess_version(loaded_cfg, cfg_filename)\n assert loaded_ver <= self.VERSION, \"Cannot merge a v{} config into a v{} config.\".format(\n loaded_ver, self.VERSION\n )\n\n if loaded_ver == self.VERSION:\n self.merge_from_other_cfg(loaded_cfg)\n else:\n # compat.py needs to import CfgNode\n from .compat import upgrade_config, downgrade_config\n\n logger.warning(\n \"Loading an old v{} config file '{}' by automatically upgrading to v{}. \"\n \"See docs/CHANGELOG.md for instructions to update your files.\".format(\n loaded_ver, cfg_filename, self.VERSION\n )\n )\n # To convert, first obtain a full config at an old version\n old_self = downgrade_config(self, to_version=loaded_ver)\n old_self.merge_from_other_cfg(loaded_cfg)\n new_config = upgrade_config(old_self)\n self.clear()\n self.update(new_config)\n\n def dump(self, *args, **kwargs):\n \"\"\"\n Returns:\n str: a yaml string representation of the config\n \"\"\"\n # to make it show up in docs\n return super().dump(*args, **kwargs)" }, { "identifier": "Instances", "path": "nativedancer/third_part/detectron2/structures/instances.py", "snippet": "class Instances:\n \"\"\"\n This class represents a list of instances in an image.\n It stores the attributes of instances (e.g., boxes, masks, labels, scores) as \"fields\".\n All fields must have the same ``__len__`` which is the number of instances.\n\n All other (non-field) attributes of this class are considered private:\n they must start with '_' and are not modifiable by a user.\n\n Some basic usage:\n\n 1. Set/get/check a field:\n\n .. code-block:: python\n\n instances.gt_boxes = Boxes(...)\n print(instances.pred_masks) # a tensor of shape (N, H, W)\n print('gt_masks' in instances)\n\n 2. ``len(instances)`` returns the number of instances\n 3. Indexing: ``instances[indices]`` will apply the indexing on all the fields\n and returns a new :class:`Instances`.\n Typically, ``indices`` is a integer vector of indices,\n or a binary mask of length ``num_instances``\n\n .. code-block:: python\n\n category_3_detections = instances[instances.pred_classes == 3]\n confident_detections = instances[instances.scores > 0.9]\n \"\"\"\n\n def __init__(self, image_size: Tuple[int, int], **kwargs: Any):\n \"\"\"\n Args:\n image_size (height, width): the spatial size of the image.\n kwargs: fields to add to this `Instances`.\n \"\"\"\n self._image_size = image_size\n self._fields: Dict[str, Any] = {}\n for k, v in kwargs.items():\n self.set(k, v)\n\n @property\n def image_size(self) -> Tuple[int, int]:\n \"\"\"\n Returns:\n tuple: height, width\n \"\"\"\n return self._image_size\n\n def __setattr__(self, name: str, val: Any) -> None:\n if name.startswith(\"_\"):\n super().__setattr__(name, val)\n else:\n self.set(name, val)\n\n def __getattr__(self, name: str) -> Any:\n if name == \"_fields\" or name not in self._fields:\n raise AttributeError(\"Cannot find field '{}' in the given Instances!\".format(name))\n return self._fields[name]\n\n def set(self, name: str, value: Any) -> None:\n \"\"\"\n Set the field named `name` to `value`.\n The length of `value` must be the number of instances,\n and must agree with other existing fields in this object.\n \"\"\"\n with warnings.catch_warnings(record=True):\n data_len = len(value)\n if len(self._fields):\n assert (\n len(self) == data_len\n ), \"Adding a field of length {} to a Instances of length {}\".format(data_len, len(self))\n self._fields[name] = value\n\n def has(self, name: str) -> bool:\n \"\"\"\n Returns:\n bool: whether the field called `name` exists.\n \"\"\"\n return name in self._fields\n\n def remove(self, name: str) -> None:\n \"\"\"\n Remove the field called `name`.\n \"\"\"\n del self._fields[name]\n\n def get(self, name: str) -> Any:\n \"\"\"\n Returns the field called `name`.\n \"\"\"\n return self._fields[name]\n\n def get_fields(self) -> Dict[str, Any]:\n \"\"\"\n Returns:\n dict: a dict which maps names (str) to data of the fields\n\n Modifying the returned dict will modify this instance.\n \"\"\"\n return self._fields\n\n # Tensor-like methods\n def to(self, *args: Any, **kwargs: Any) -> \"Instances\":\n \"\"\"\n Returns:\n Instances: all fields are called with a `to(device)`, if the field has this method.\n \"\"\"\n ret = Instances(self._image_size)\n for k, v in self._fields.items():\n if hasattr(v, \"to\"):\n v = v.to(*args, **kwargs)\n ret.set(k, v)\n return ret\n\n def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> \"Instances\":\n \"\"\"\n Args:\n item: an index-like object and will be used to index all the fields.\n\n Returns:\n If `item` is a string, return the data in the corresponding field.\n Otherwise, returns an `Instances` where all fields are indexed by `item`.\n \"\"\"\n if type(item) == int:\n if item >= len(self) or item < -len(self):\n raise IndexError(\"Instances index out of range!\")\n else:\n item = slice(item, None, len(self))\n\n ret = Instances(self._image_size)\n for k, v in self._fields.items():\n ret.set(k, v[item])\n return ret\n\n def __len__(self) -> int:\n for v in self._fields.values():\n # use __len__ because len() has to be int and is not friendly to tracing\n return v.__len__()\n raise NotImplementedError(\"Empty Instances does not support __len__!\")\n\n def __iter__(self):\n raise NotImplementedError(\"`Instances` object is not iterable!\")\n\n @staticmethod\n def cat(instance_lists: List[\"Instances\"]) -> \"Instances\":\n \"\"\"\n Args:\n instance_lists (list[Instances])\n\n Returns:\n Instances\n \"\"\"\n assert all(isinstance(i, Instances) for i in instance_lists)\n assert len(instance_lists) > 0\n if len(instance_lists) == 1:\n return instance_lists[0]\n\n image_size = instance_lists[0].image_size\n if not isinstance(image_size, torch.Tensor): # could be a tensor in tracing\n for i in instance_lists[1:]:\n assert i.image_size == image_size\n ret = Instances(image_size)\n for k in instance_lists[0]._fields.keys():\n values = [i.get(k) for i in instance_lists]\n v0 = values[0]\n if isinstance(v0, torch.Tensor):\n values = torch.cat(values, dim=0)\n elif isinstance(v0, list):\n values = list(itertools.chain(*values))\n elif hasattr(type(v0), \"cat\"):\n values = type(v0).cat(values)\n else:\n raise ValueError(\"Unsupported type {} for concatenation\".format(type(v0)))\n ret.set(k, values)\n return ret\n\n def __str__(self) -> str:\n s = self.__class__.__name__ + \"(\"\n s += \"num_instances={}, \".format(len(self))\n s += \"image_height={}, \".format(self._image_size[0])\n s += \"image_width={}, \".format(self._image_size[1])\n s += \"fields=[{}])\".format(\", \".join((f\"{k}: {v}\" for k, v in self._fields.items())))\n return s\n\n __repr__ = __str__" }, { "identifier": "PixToShapeCycleLoss", "path": "nativedancer/third_part/densepose/modeling/losses/cycle_pix2shape.py", "snippet": "class PixToShapeCycleLoss(nn.Module):\n \"\"\"\n Cycle loss for pixel-vertex correspondence\n \"\"\"\n\n def __init__(self, cfg: CfgNode):\n super().__init__()\n self.shape_names = list(cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBEDDERS.keys())\n self.embed_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBED_SIZE\n self.norm_p = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.NORM_P\n self.use_all_meshes_not_gt_only = (\n cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.USE_ALL_MESHES_NOT_GT_ONLY\n )\n self.num_pixels_to_sample = (\n cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.NUM_PIXELS_TO_SAMPLE\n )\n self.pix_sigma = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.PIXEL_SIGMA\n self.temperature_pix_to_vertex = (\n cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.TEMPERATURE_PIXEL_TO_VERTEX\n )\n self.temperature_vertex_to_pix = (\n cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.TEMPERATURE_VERTEX_TO_PIXEL\n )\n self.pixel_dists = _create_pixel_dist_matrix(cfg.MODEL.ROI_DENSEPOSE_HEAD.HEATMAP_SIZE)\n\n def forward(\n self,\n proposals_with_gt: List[Instances],\n densepose_predictor_outputs: Any,\n packed_annotations: PackedCseAnnotations,\n embedder: nn.Module,\n ):\n \"\"\"\n Args:\n proposals_with_gt (list of Instances): detections with associated\n ground truth data; each item corresponds to instances detected\n on 1 image; the number of items corresponds to the number of\n images in a batch\n densepose_predictor_outputs: an object of a dataclass that contains predictor\n outputs with estimated values; assumed to have the following attributes:\n * embedding - embedding estimates, tensor of shape [N, D, S, S], where\n N = number of instances (= sum N_i, where N_i is the number of\n instances on image i)\n D = embedding space dimensionality (MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBED_SIZE)\n S = output size (width and height)\n packed_annotations (PackedCseAnnotations): contains various data useful\n for loss computation, each data is packed into a single tensor\n embedder (nn.Module): module that computes vertex embeddings for different meshes\n \"\"\"\n pix_embeds = densepose_predictor_outputs.embedding\n if self.pixel_dists.device != pix_embeds.device:\n # should normally be done only once\n self.pixel_dists = self.pixel_dists.to(device=pix_embeds.device)\n with torch.no_grad():\n mask_loss_data = extract_data_for_mask_loss_from_matches(\n proposals_with_gt, densepose_predictor_outputs.coarse_segm\n )\n # GT masks - tensor of shape [N, S, S] of int64\n masks_gt = mask_loss_data.masks_gt.long() # pyre-ignore[16]\n assert len(pix_embeds) == len(masks_gt), (\n f\"Number of instances with embeddings {len(pix_embeds)} != \"\n f\"number of instances with GT masks {len(masks_gt)}\"\n )\n losses = []\n mesh_names = (\n self.shape_names\n if self.use_all_meshes_not_gt_only\n else [\n MeshCatalog.get_mesh_name(mesh_id.item())\n for mesh_id in packed_annotations.vertex_mesh_ids_gt.unique()\n ]\n )\n for pixel_embeddings, mask_gt in zip(pix_embeds, masks_gt):\n # pixel_embeddings [D, S, S]\n # mask_gt [S, S]\n for mesh_name in mesh_names:\n mesh_vertex_embeddings = embedder(mesh_name)\n # pixel indices [M]\n pixel_indices_flattened = _sample_fg_pixels_randperm(\n mask_gt, self.num_pixels_to_sample\n )\n # pixel distances [M, M]\n pixel_dists = self.pixel_dists.to(pixel_embeddings.device)[\n torch.meshgrid(pixel_indices_flattened, pixel_indices_flattened)\n ]\n # pixel embeddings [M, D]\n pixel_embeddings_sampled = normalize_embeddings(\n pixel_embeddings.reshape((self.embed_size, -1))[:, pixel_indices_flattened].T\n )\n # pixel-vertex similarity [M, K]\n sim_matrix = pixel_embeddings_sampled.mm(mesh_vertex_embeddings.T)\n c_pix_vertex = F.softmax(sim_matrix / self.temperature_pix_to_vertex, dim=1)\n c_vertex_pix = F.softmax(sim_matrix.T / self.temperature_vertex_to_pix, dim=1)\n c_cycle = c_pix_vertex.mm(c_vertex_pix)\n loss_cycle = torch.norm(pixel_dists * c_cycle, p=self.norm_p)\n losses.append(loss_cycle)\n\n if len(losses) == 0:\n return pix_embeds.sum() * 0\n return torch.stack(losses, dim=0).mean()\n\n def fake_value(self, densepose_predictor_outputs: Any, embedder: nn.Module):\n losses = [embedder(mesh_name).sum() * 0 for mesh_name in embedder.mesh_names]\n losses.append(densepose_predictor_outputs.embedding.sum() * 0)\n return torch.mean(torch.stack(losses))" }, { "identifier": "ShapeToShapeCycleLoss", "path": "nativedancer/third_part/densepose/modeling/losses/cycle_shape2shape.py", "snippet": "class ShapeToShapeCycleLoss(nn.Module):\n \"\"\"\n Cycle Loss for Shapes.\n Inspired by:\n \"Mapping in a Cycle: Sinkhorn Regularized Unsupervised Learning for Point Cloud Shapes\".\n \"\"\"\n\n def __init__(self, cfg: CfgNode):\n super().__init__()\n self.shape_names = list(cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBEDDERS.keys())\n self.all_shape_pairs = [\n (x, y) for i, x in enumerate(self.shape_names) for y in self.shape_names[i + 1 :]\n ]\n random.shuffle(self.all_shape_pairs)\n self.cur_pos = 0\n self.norm_p = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.SHAPE_TO_SHAPE_CYCLE_LOSS.NORM_P\n self.temperature = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.SHAPE_TO_SHAPE_CYCLE_LOSS.TEMPERATURE\n self.max_num_vertices = (\n cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.SHAPE_TO_SHAPE_CYCLE_LOSS.MAX_NUM_VERTICES\n )\n\n def _sample_random_pair(self) -> Tuple[str, str]:\n \"\"\"\n Produce a random pair of different mesh names\n\n Return:\n tuple(str, str): a pair of different mesh names\n \"\"\"\n if self.cur_pos >= len(self.all_shape_pairs):\n random.shuffle(self.all_shape_pairs)\n self.cur_pos = 0\n shape_pair = self.all_shape_pairs[self.cur_pos]\n self.cur_pos += 1\n return shape_pair\n\n def forward(self, embedder: nn.Module):\n \"\"\"\n Do a forward pass with a random pair (src, dst) pair of shapes\n Args:\n embedder (nn.Module): module that computes vertex embeddings for different meshes\n \"\"\"\n src_mesh_name, dst_mesh_name = self._sample_random_pair()\n return self._forward_one_pair(embedder, src_mesh_name, dst_mesh_name)\n\n def fake_value(self, embedder: nn.Module):\n losses = []\n for mesh_name in embedder.mesh_names:\n losses.append(embedder(mesh_name).sum() * 0)\n return torch.mean(torch.stack(losses))\n\n def _get_embeddings_and_geodists_for_mesh(\n self, embedder: nn.Module, mesh_name: str\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Produces embeddings and geodesic distance tensors for a given mesh. May subsample\n the mesh, if it contains too many vertices (controlled by\n SHAPE_CYCLE_LOSS_MAX_NUM_VERTICES parameter).\n Args:\n embedder (nn.Module): module that computes embeddings for mesh vertices\n mesh_name (str): mesh name\n Return:\n embeddings (torch.Tensor of size [N, D]): embeddings for selected mesh\n vertices (N = number of selected vertices, D = embedding space dim)\n geodists (torch.Tensor of size [N, N]): geodesic distances for the selected\n mesh vertices (N = number of selected vertices)\n \"\"\"\n embeddings = embedder(mesh_name)\n indices = sample_random_indices(\n embeddings.shape[0], self.max_num_vertices, embeddings.device\n )\n mesh = create_mesh(mesh_name, embeddings.device)\n geodists = mesh.geodists\n if indices is not None:\n embeddings = embeddings[indices]\n geodists = geodists[torch.meshgrid(indices, indices)]\n return embeddings, geodists\n\n def _forward_one_pair(\n self, embedder: nn.Module, mesh_name_1: str, mesh_name_2: str\n ) -> torch.Tensor:\n \"\"\"\n Do a forward pass with a selected pair of meshes\n Args:\n embedder (nn.Module): module that computes vertex embeddings for different meshes\n mesh_name_1 (str): first mesh name\n mesh_name_2 (str): second mesh name\n Return:\n Tensor containing the loss value\n \"\"\"\n embeddings_1, geodists_1 = self._get_embeddings_and_geodists_for_mesh(embedder, mesh_name_1)\n embeddings_2, geodists_2 = self._get_embeddings_and_geodists_for_mesh(embedder, mesh_name_2)\n sim_matrix_12 = embeddings_1.mm(embeddings_2.T)\n\n c_12 = F.softmax(sim_matrix_12 / self.temperature, dim=1)\n c_21 = F.softmax(sim_matrix_12.T / self.temperature, dim=1)\n c_11 = c_12.mm(c_21)\n c_22 = c_21.mm(c_12)\n\n loss_cycle_11 = torch.norm(geodists_1 * c_11, p=self.norm_p)\n loss_cycle_22 = torch.norm(geodists_2 * c_22, p=self.norm_p)\n\n return loss_cycle_11 + loss_cycle_22" }, { "identifier": "EmbeddingLoss", "path": "nativedancer/third_part/densepose/modeling/losses/embed.py", "snippet": "class EmbeddingLoss:\n \"\"\"\n Computes losses for estimated embeddings given annotated vertices.\n Instances in a minibatch that correspond to the same mesh are grouped\n together. For each group, loss is computed as cross-entropy for\n unnormalized scores given ground truth mesh vertex ids.\n Scores are based on squared distances between estimated vertex embeddings\n and mesh vertex embeddings.\n \"\"\"\n\n def __init__(self, cfg: CfgNode):\n \"\"\"\n Initialize embedding loss from config\n \"\"\"\n self.embdist_gauss_sigma = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBEDDING_DIST_GAUSS_SIGMA\n\n def __call__(\n self,\n proposals_with_gt: List[Instances],\n densepose_predictor_outputs: Any,\n packed_annotations: PackedCseAnnotations,\n interpolator: BilinearInterpolationHelper,\n embedder: nn.Module,\n ) -> Dict[int, torch.Tensor]:\n \"\"\"\n Produces losses for estimated embeddings given annotated vertices.\n Embeddings for all the vertices of a mesh are computed by the embedder.\n Embeddings for observed pixels are estimated by a predictor.\n Losses are computed as cross-entropy for squared distances between\n observed vertex embeddings and all mesh vertex embeddings given\n ground truth vertex IDs.\n\n Args:\n proposals_with_gt (list of Instances): detections with associated\n ground truth data; each item corresponds to instances detected\n on 1 image; the number of items corresponds to the number of\n images in a batch\n densepose_predictor_outputs: an object of a dataclass that contains predictor\n outputs with estimated values; assumed to have the following attributes:\n * embedding - embedding estimates, tensor of shape [N, D, S, S], where\n N = number of instances (= sum N_i, where N_i is the number of\n instances on image i)\n D = embedding space dimensionality (MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBED_SIZE)\n S = output size (width and height)\n packed_annotations (PackedCseAnnotations): contains various data useful\n for loss computation, each data is packed into a single tensor\n interpolator (BilinearInterpolationHelper): bilinear interpolation helper\n embedder (nn.Module): module that computes vertex embeddings for different meshes\n Return:\n dict(int -> tensor): losses for different mesh IDs\n \"\"\"\n losses = {}\n for mesh_id_tensor in packed_annotations.vertex_mesh_ids_gt.unique():\n mesh_id = mesh_id_tensor.item()\n mesh_name = MeshCatalog.get_mesh_name(mesh_id)\n # valid points are those that fall into estimated bbox\n # and correspond to the current mesh\n j_valid = interpolator.j_valid * ( # pyre-ignore[16]\n packed_annotations.vertex_mesh_ids_gt == mesh_id\n )\n if not torch.any(j_valid):\n continue\n # extract estimated embeddings for valid points\n # -> tensor [J, D]\n vertex_embeddings_i = normalize_embeddings(\n interpolator.extract_at_points(\n densepose_predictor_outputs.embedding,\n slice_fine_segm=slice(None),\n w_ylo_xlo=interpolator.w_ylo_xlo[:, None], # pyre-ignore[16]\n w_ylo_xhi=interpolator.w_ylo_xhi[:, None], # pyre-ignore[16]\n w_yhi_xlo=interpolator.w_yhi_xlo[:, None], # pyre-ignore[16]\n w_yhi_xhi=interpolator.w_yhi_xhi[:, None], # pyre-ignore[16]\n )[j_valid, :]\n )\n # extract vertex ids for valid points\n # -> tensor [J]\n vertex_indices_i = packed_annotations.vertex_ids_gt[j_valid]\n # embeddings for all mesh vertices\n # -> tensor [K, D]\n mesh_vertex_embeddings = embedder(mesh_name)\n # unnormalized scores for valid points\n # -> tensor [J, K]\n scores = squared_euclidean_distance_matrix(\n vertex_embeddings_i, mesh_vertex_embeddings\n ) / (-self.embdist_gauss_sigma)\n losses[mesh_name] = F.cross_entropy(scores, vertex_indices_i, ignore_index=-1)\n\n for mesh_name in embedder.mesh_names:\n if mesh_name not in losses:\n losses[mesh_name] = self.fake_value(\n densepose_predictor_outputs, embedder, mesh_name\n )\n return losses\n\n def fake_values(self, densepose_predictor_outputs: Any, embedder: nn.Module):\n losses = {}\n for mesh_name in embedder.mesh_names:\n losses[mesh_name] = self.fake_value(densepose_predictor_outputs, embedder, mesh_name)\n return losses\n\n def fake_value(self, densepose_predictor_outputs: Any, embedder: nn.Module, mesh_name: str):\n return densepose_predictor_outputs.embedding.sum() * 0 + embedder(mesh_name).sum() * 0" }, { "identifier": "CseAnnotationsAccumulator", "path": "nativedancer/third_part/densepose/modeling/losses/embed_utils.py", "snippet": "class CseAnnotationsAccumulator(AnnotationsAccumulator):\n \"\"\"\n Accumulates annotations by batches that correspond to objects detected on\n individual images. Can pack them together into single tensors.\n \"\"\"\n\n def __init__(self):\n self.x_gt = []\n self.y_gt = []\n self.s_gt = []\n self.vertex_mesh_ids_gt = []\n self.vertex_ids_gt = []\n self.bbox_xywh_gt = []\n self.bbox_xywh_est = []\n self.point_bbox_with_dp_indices = []\n self.point_bbox_indices = []\n self.bbox_indices = []\n self.nxt_bbox_with_dp_index = 0\n self.nxt_bbox_index = 0\n\n def accumulate(self, instances_one_image: Instances):\n \"\"\"\n Accumulate instances data for one image\n\n Args:\n instances_one_image (Instances): instances data to accumulate\n \"\"\"\n boxes_xywh_est = BoxMode.convert(\n instances_one_image.proposal_boxes.tensor.clone(), BoxMode.XYXY_ABS, BoxMode.XYWH_ABS\n )\n boxes_xywh_gt = BoxMode.convert(\n instances_one_image.gt_boxes.tensor.clone(), BoxMode.XYXY_ABS, BoxMode.XYWH_ABS\n )\n n_matches = len(boxes_xywh_gt)\n assert n_matches == len(\n boxes_xywh_est\n ), f\"Got {len(boxes_xywh_est)} proposal boxes and {len(boxes_xywh_gt)} GT boxes\"\n if not n_matches:\n # no detection - GT matches\n return\n if (\n not hasattr(instances_one_image, \"gt_densepose\")\n or instances_one_image.gt_densepose is None\n ):\n # no densepose GT for the detections, just increase the bbox index\n self.nxt_bbox_index += n_matches\n return\n for box_xywh_est, box_xywh_gt, dp_gt in zip(\n boxes_xywh_est, boxes_xywh_gt, instances_one_image.gt_densepose\n ):\n if (dp_gt is not None) and (len(dp_gt.x) > 0):\n # pyre-fixme[6]: For 1st argument expected `Tensor` but got `float`.\n # pyre-fixme[6]: For 2nd argument expected `Tensor` but got `float`.\n self._do_accumulate(box_xywh_gt, box_xywh_est, dp_gt)\n self.nxt_bbox_index += 1\n\n def _do_accumulate(self, box_xywh_gt: torch.Tensor, box_xywh_est: torch.Tensor, dp_gt: Any):\n \"\"\"\n Accumulate instances data for one image, given that the data is not empty\n\n Args:\n box_xywh_gt (tensor): GT bounding box\n box_xywh_est (tensor): estimated bounding box\n dp_gt: GT densepose data with the following attributes:\n - x: normalized X coordinates\n - y: normalized Y coordinates\n - segm: tensor of size [S, S] with coarse segmentation\n -\n \"\"\"\n self.x_gt.append(dp_gt.x)\n self.y_gt.append(dp_gt.y)\n if hasattr(dp_gt, \"segm\"):\n self.s_gt.append(dp_gt.segm.unsqueeze(0))\n self.vertex_ids_gt.append(dp_gt.vertex_ids)\n self.vertex_mesh_ids_gt.append(torch.full_like(dp_gt.vertex_ids, dp_gt.mesh_id))\n self.bbox_xywh_gt.append(box_xywh_gt.view(-1, 4))\n self.bbox_xywh_est.append(box_xywh_est.view(-1, 4))\n self.point_bbox_with_dp_indices.append(\n torch.full_like(dp_gt.vertex_ids, self.nxt_bbox_with_dp_index)\n )\n self.point_bbox_indices.append(torch.full_like(dp_gt.vertex_ids, self.nxt_bbox_index))\n self.bbox_indices.append(self.nxt_bbox_index)\n self.nxt_bbox_with_dp_index += 1\n\n def pack(self) -> Optional[PackedCseAnnotations]:\n \"\"\"\n Pack data into tensors\n \"\"\"\n if not len(self.x_gt):\n # TODO:\n # returning proper empty annotations would require\n # creating empty tensors of appropriate shape and\n # type on an appropriate device;\n # we return None so far to indicate empty annotations\n return None\n return PackedCseAnnotations(\n x_gt=torch.cat(self.x_gt, 0),\n y_gt=torch.cat(self.y_gt, 0),\n vertex_mesh_ids_gt=torch.cat(self.vertex_mesh_ids_gt, 0),\n vertex_ids_gt=torch.cat(self.vertex_ids_gt, 0),\n # ignore segmentation annotations, if not all the instances contain those\n coarse_segm_gt=torch.cat(self.s_gt, 0)\n if len(self.s_gt) == len(self.bbox_xywh_gt)\n else None,\n bbox_xywh_gt=torch.cat(self.bbox_xywh_gt, 0),\n bbox_xywh_est=torch.cat(self.bbox_xywh_est, 0),\n point_bbox_with_dp_indices=torch.cat(self.point_bbox_with_dp_indices, 0),\n point_bbox_indices=torch.cat(self.point_bbox_indices, 0),\n bbox_indices=torch.as_tensor(\n self.bbox_indices, dtype=torch.long, device=self.x_gt[0].device\n ),\n )" }, { "identifier": "MaskOrSegmentationLoss", "path": "nativedancer/third_part/densepose/modeling/losses/mask_or_segm.py", "snippet": "class MaskOrSegmentationLoss:\n \"\"\"\n Mask or segmentation loss as cross-entropy for raw unnormalized scores\n given ground truth labels. Ground truth labels are either defined by coarse\n segmentation annotation, or by mask annotation, depending on the config\n value MODEL.ROI_DENSEPOSE_HEAD.COARSE_SEGM_TRAINED_BY_MASKS\n \"\"\"\n\n def __init__(self, cfg: CfgNode):\n \"\"\"\n Initialize segmentation loss from configuration options\n\n Args:\n cfg (CfgNode): configuration options\n \"\"\"\n self.segm_trained_by_masks = cfg.MODEL.ROI_DENSEPOSE_HEAD.COARSE_SEGM_TRAINED_BY_MASKS\n if self.segm_trained_by_masks:\n self.mask_loss = MaskLoss()\n self.segm_loss = SegmentationLoss(cfg)\n\n def __call__(\n self,\n proposals_with_gt: List[Instances],\n densepose_predictor_outputs: Any,\n packed_annotations: Any,\n ) -> torch.Tensor:\n \"\"\"\n Compute segmentation loss as cross-entropy between aligned unnormalized\n score estimates and ground truth; with ground truth given\n either by masks, or by coarse segmentation annotations.\n\n Args:\n proposals_with_gt (list of Instances): detections with associated ground truth data\n densepose_predictor_outputs: an object of a dataclass that contains predictor outputs\n with estimated values; assumed to have the following attributes:\n * coarse_segm - coarse segmentation estimates, tensor of shape [N, D, S, S]\n packed_annotations: packed annotations for efficient loss computation\n Return:\n tensor: loss value as cross-entropy for raw unnormalized scores\n given ground truth labels\n \"\"\"\n if self.segm_trained_by_masks:\n return self.mask_loss(proposals_with_gt, densepose_predictor_outputs)\n return self.segm_loss(proposals_with_gt, densepose_predictor_outputs, packed_annotations)\n\n def fake_value(self, densepose_predictor_outputs: Any) -> torch.Tensor:\n \"\"\"\n Fake segmentation loss used when no suitable ground truth data\n was found in a batch. The loss has a value 0 and is primarily used to\n construct the computation graph, so that `DistributedDataParallel`\n has similar graphs on all GPUs and can perform reduction properly.\n\n Args:\n densepose_predictor_outputs: DensePose predictor outputs, an object\n of a dataclass that is assumed to have `coarse_segm`\n attribute\n Return:\n Zero value loss with proper computation graph\n \"\"\"\n return densepose_predictor_outputs.coarse_segm.sum() * 0" }, { "identifier": "DENSEPOSE_LOSS_REGISTRY", "path": "nativedancer/third_part/densepose/modeling/losses/registry.py", "snippet": "DENSEPOSE_LOSS_REGISTRY = Registry(\"DENSEPOSE_LOSS\")" }, { "identifier": "SoftEmbeddingLoss", "path": "nativedancer/third_part/densepose/modeling/losses/soft_embed.py", "snippet": "class SoftEmbeddingLoss:\n \"\"\"\n Computes losses for estimated embeddings given annotated vertices.\n Instances in a minibatch that correspond to the same mesh are grouped\n together. For each group, loss is computed as cross-entropy for\n unnormalized scores given ground truth mesh vertex ids.\n Scores are based on:\n 1) squared distances between estimated vertex embeddings\n and mesh vertex embeddings;\n 2) geodesic distances between vertices of a mesh\n \"\"\"\n\n def __init__(self, cfg: CfgNode):\n \"\"\"\n Initialize embedding loss from config\n \"\"\"\n self.embdist_gauss_sigma = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBEDDING_DIST_GAUSS_SIGMA\n self.geodist_gauss_sigma = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.GEODESIC_DIST_GAUSS_SIGMA\n\n def __call__(\n self,\n proposals_with_gt: List[Instances],\n densepose_predictor_outputs: Any,\n packed_annotations: PackedCseAnnotations,\n interpolator: BilinearInterpolationHelper,\n embedder: nn.Module,\n ) -> Dict[int, torch.Tensor]:\n \"\"\"\n Produces losses for estimated embeddings given annotated vertices.\n Embeddings for all the vertices of a mesh are computed by the embedder.\n Embeddings for observed pixels are estimated by a predictor.\n Losses are computed as cross-entropy for unnormalized scores given\n ground truth vertex IDs.\n 1) squared distances between estimated vertex embeddings\n and mesh vertex embeddings;\n 2) geodesic distances between vertices of a mesh\n\n Args:\n proposals_with_gt (list of Instances): detections with associated\n ground truth data; each item corresponds to instances detected\n on 1 image; the number of items corresponds to the number of\n images in a batch\n densepose_predictor_outputs: an object of a dataclass that contains predictor\n outputs with estimated values; assumed to have the following attributes:\n * embedding - embedding estimates, tensor of shape [N, D, S, S], where\n N = number of instances (= sum N_i, where N_i is the number of\n instances on image i)\n D = embedding space dimensionality (MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBED_SIZE)\n S = output size (width and height)\n packed_annotations (PackedCseAnnotations): contains various data useful\n for loss computation, each data is packed into a single tensor\n interpolator (BilinearInterpolationHelper): bilinear interpolation helper\n embedder (nn.Module): module that computes vertex embeddings for different meshes\n Return:\n dict(int -> tensor): losses for different mesh IDs\n \"\"\"\n losses = {}\n for mesh_id_tensor in packed_annotations.vertex_mesh_ids_gt.unique():\n mesh_id = mesh_id_tensor.item()\n mesh_name = MeshCatalog.get_mesh_name(mesh_id)\n # valid points are those that fall into estimated bbox\n # and correspond to the current mesh\n j_valid = interpolator.j_valid * ( # pyre-ignore[16]\n packed_annotations.vertex_mesh_ids_gt == mesh_id\n )\n if not torch.any(j_valid):\n continue\n # extract estimated embeddings for valid points\n # -> tensor [J, D]\n vertex_embeddings_i = normalize_embeddings(\n interpolator.extract_at_points(\n densepose_predictor_outputs.embedding,\n slice_fine_segm=slice(None),\n w_ylo_xlo=interpolator.w_ylo_xlo[:, None], # pyre-ignore[16]\n w_ylo_xhi=interpolator.w_ylo_xhi[:, None], # pyre-ignore[16]\n w_yhi_xlo=interpolator.w_yhi_xlo[:, None], # pyre-ignore[16]\n w_yhi_xhi=interpolator.w_yhi_xhi[:, None], # pyre-ignore[16]\n )[j_valid, :]\n )\n # extract vertex ids for valid points\n # -> tensor [J]\n vertex_indices_i = packed_annotations.vertex_ids_gt[j_valid]\n # embeddings for all mesh vertices\n # -> tensor [K, D]\n mesh_vertex_embeddings = embedder(mesh_name)\n # softmax values of geodesic distances for GT mesh vertices\n # -> tensor [J, K]\n mesh = create_mesh(mesh_name, mesh_vertex_embeddings.device)\n geodist_softmax_values = F.softmax(\n mesh.geodists[vertex_indices_i] / (-self.geodist_gauss_sigma), dim=1\n )\n # logsoftmax values for valid points\n # -> tensor [J, K]\n embdist_logsoftmax_values = F.log_softmax(\n squared_euclidean_distance_matrix(vertex_embeddings_i, mesh_vertex_embeddings)\n / (-self.embdist_gauss_sigma),\n dim=1,\n )\n losses[mesh_name] = (-geodist_softmax_values * embdist_logsoftmax_values).sum(1).mean()\n\n for mesh_name in embedder.mesh_names:\n if mesh_name not in losses:\n losses[mesh_name] = self.fake_value(\n densepose_predictor_outputs, embedder, mesh_name\n )\n return losses\n\n def fake_values(self, densepose_predictor_outputs: Any, embedder: nn.Module):\n losses = {}\n for mesh_name in embedder.mesh_names:\n losses[mesh_name] = self.fake_value(densepose_predictor_outputs, embedder, mesh_name)\n return losses\n\n def fake_value(self, densepose_predictor_outputs: Any, embedder: nn.Module, mesh_name: str):\n return densepose_predictor_outputs.embedding.sum() * 0 + embedder(mesh_name).sum() * 0" }, { "identifier": "BilinearInterpolationHelper", "path": "nativedancer/third_part/densepose/modeling/losses/utils.py", "snippet": "def _linear_interpolation_utilities(v_norm, v0_src, size_src, v0_dst, size_dst, size_z):\n def __init__(\n self,\n packed_annotations: Any,\n j_valid: torch.Tensor,\n y_lo: torch.Tensor,\n y_hi: torch.Tensor,\n x_lo: torch.Tensor,\n x_hi: torch.Tensor,\n w_ylo_xlo: torch.Tensor,\n w_ylo_xhi: torch.Tensor,\n w_yhi_xlo: torch.Tensor,\n w_yhi_xhi: torch.Tensor,\n ):\n def from_matches(\n packed_annotations: Any, densepose_outputs_size_hw: Tuple[int, int]\n ) -> \"BilinearInterpolationHelper\":\n def extract_at_points(\n self,\n z_est,\n slice_fine_segm=None,\n w_ylo_xlo=None,\n w_ylo_xhi=None,\n w_yhi_xlo=None,\n w_yhi_xhi=None,\n ):\ndef resample_data(\n z, bbox_xywh_src, bbox_xywh_dst, wout, hout, mode: str = \"nearest\", padding_mode: str = \"zeros\"\n):\n def accumulate(self, instances_one_image: Instances):\n def pack(self) -> Any:\n def __init__(self):\n def accumulate(self, instances_one_image: Instances):\n def _do_accumulate(\n self, box_xywh_gt: torch.Tensor, box_xywh_est: torch.Tensor, dp_gt: DensePoseDataRelative\n ):\n def pack(self) -> Optional[PackedChartBasedAnnotations]:\ndef extract_packed_annotations_from_matches(\n proposals_with_targets: List[Instances], accumulator: AnnotationsAccumulator\n) -> Any:\ndef sample_random_indices(\n n_indices: int, n_samples: int, device: Optional[torch.device] = None\n) -> Optional[torch.Tensor]:\nclass BilinearInterpolationHelper:\nclass AnnotationsAccumulator(ABC):\nclass PackedChartBasedAnnotations:\nclass ChartBasedAnnotationsAccumulator(AnnotationsAccumulator):" } ]
from typing import Any, List from torch import nn from nativedancer.third_part.detectron2.config import CfgNode from nativedancer.third_part.detectron2.structures import Instances from .cycle_pix2shape import PixToShapeCycleLoss from .cycle_shape2shape import ShapeToShapeCycleLoss from .embed import EmbeddingLoss from .embed_utils import CseAnnotationsAccumulator from .mask_or_segm import MaskOrSegmentationLoss from .registry import DENSEPOSE_LOSS_REGISTRY from .soft_embed import SoftEmbeddingLoss from .utils import BilinearInterpolationHelper, LossDict, extract_packed_annotations_from_matches
10,100
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved @DENSEPOSE_LOSS_REGISTRY.register() class DensePoseCseLoss: """ """ _EMBED_LOSS_REGISTRY = { EmbeddingLoss.__name__: EmbeddingLoss, SoftEmbeddingLoss.__name__: SoftEmbeddingLoss, }
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved @DENSEPOSE_LOSS_REGISTRY.register() class DensePoseCseLoss: """ """ _EMBED_LOSS_REGISTRY = { EmbeddingLoss.__name__: EmbeddingLoss, SoftEmbeddingLoss.__name__: SoftEmbeddingLoss, }
def __init__(self, cfg: CfgNode):
0
2023-12-10 20:14:00+00:00
12k
ethanweber/nerfiller
nerfiller/scripts/inpaint_nerfstudio_dataset.py
[ { "identifier": "RGBInpainter", "path": "nerfiller/inpaint/rgb_inpainter.py", "snippet": "class RGBInpainter:\n \"\"\"\n Module for inpainting with the stable diffusion inpainting pipeline.\n \"\"\"\n\n def __init__(\n self,\n half_precision_weights: bool = True,\n lora_model_path: Optional[str] = None,\n device: str = \"cuda:0\",\n vae_device: str = \"cuda:0\",\n pipeline_name: str = \"stabilityai/stable-diffusion-2-inpainting\",\n ):\n print(f\"Loading RGB Inpainter ...\")\n\n self.half_precision_weights = half_precision_weights\n self.lora_model_path = lora_model_path\n self.device = device\n self.vae_device = vae_device\n self.dtype = torch.float16 if self.half_precision_weights else torch.float32\n self.pipeline_name = pipeline_name\n self.set_pipe()\n self.setup()\n\n def set_pipe(self):\n pipe_kwargs = {\n \"safety_checker\": None,\n \"feature_extractor\": None,\n \"requires_safety_checker\": False,\n \"torch_dtype\": self.dtype,\n }\n self.pipe = StableDiffusionInpaintPipeline.from_pretrained(\n self.pipeline_name,\n **pipe_kwargs,\n )\n\n def setup(self):\n # Load LoRA\n if self.lora_model_path:\n self.pipe.load_lora_weights(self.lora_model_path)\n print(f\"Loaded LoRA model from {self.lora_model_path}\")\n\n self.tokenizer = self.pipe.tokenizer\n self.text_encoder = self.pipe.text_encoder.to(self.device).eval()\n\n self.unet = self.pipe.unet.to(self.device).eval()\n self.vae = self.pipe.vae.to(self.vae_device).eval()\n\n self.vae_scale_factor = 2 ** (len(self.pipe.vae.config.block_out_channels) - 1)\n self.vae_latent_channels = self.pipe.vae.config.latent_channels\n\n # self.scheduler = DDPMScheduler.from_config(self.pipe.scheduler.config)\n self.scheduler = DDIMScheduler.from_config(self.pipe.scheduler.config)\n self.num_train_timesteps = self.scheduler.num_train_timesteps\n self.alphas = self.scheduler.alphas_cumprod.to(self.device)\n\n del self.pipe\n cleanup()\n\n print(f\"Loaded RGB inpainter!\")\n\n def compute_text_embeddings(self, prompt: str, negative_prompt: str):\n \"\"\"Get the text embeddings for a string.\"\"\"\n assert self.tokenizer is not None\n assert self.text_encoder is not None\n with torch.no_grad():\n text_inputs = tokenize_prompt(self.tokenizer, prompt, tokenizer_max_length=None)\n prompt_embeds = encode_prompt(\n self.text_encoder,\n text_inputs.input_ids,\n text_inputs.attention_mask,\n text_encoder_use_attention_mask=False,\n )\n negative_text_inputs = tokenize_prompt(self.tokenizer, negative_prompt, tokenizer_max_length=None)\n negative_prompt_embeds = encode_prompt(\n self.text_encoder,\n negative_text_inputs.input_ids,\n negative_text_inputs.attention_mask,\n text_encoder_use_attention_mask=False,\n )\n\n return [prompt_embeds, negative_prompt_embeds]\n\n def destroy_text_encoder(self) -> None:\n \"\"\"Delete the text modules to save on memory.\"\"\"\n del self.tokenizer\n del self.text_encoder\n cleanup()\n\n def forward_unet(\n self,\n sample,\n t,\n text_embeddings,\n denoise_in_grid: bool = False,\n ):\n # process embeddings\n prompt_embeds, negative_prompt_embeds = text_embeddings\n\n batch_size = sample.shape[0] // 3\n\n prompt_embeds = torch.cat(\n [\n prompt_embeds.repeat(batch_size, 1, 1),\n negative_prompt_embeds.repeat(batch_size, 1, 1),\n negative_prompt_embeds.repeat(batch_size, 1, 1),\n ]\n )\n\n if denoise_in_grid:\n grid_sample = make_grid(sample)\n grid_prompt_embeds = prompt_embeds[:3].repeat(grid_sample.shape[0] // 3, 1, 1)\n noise_pred = self.unet(\n sample=grid_sample,\n timestep=t,\n encoder_hidden_states=grid_prompt_embeds,\n return_dict=False,\n )[0]\n noise_pred = undo_grid(noise_pred)\n else:\n noise_pred = self.unet(\n sample=sample,\n timestep=t,\n encoder_hidden_states=prompt_embeds,\n return_dict=False,\n )[0]\n return noise_pred\n\n def get_noise_pred(\n self,\n t,\n model_input: ModelInput,\n text_embeddings,\n text_guidance_scale: float = 0.0,\n image_guidance_scale: float = 0.0,\n denoise_in_grid: bool = False,\n multidiffusion_steps: int = 1,\n multidiffusion_type: str = \"epsilon\",\n randomize_latents: bool = False,\n randomize_within_grid: bool = False,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n only_noise_pred: bool = False,\n ):\n assert self.scheduler.config.prediction_type == \"epsilon\", \"We assume the model predicts epsilon.\"\n\n batch_size = model_input.latents.shape[0]\n value = torch.zeros_like(model_input.latents)\n count = torch.zeros_like(model_input.latents)\n\n for i in range(multidiffusion_steps):\n if randomize_latents:\n indices = torch.randperm(batch_size)\n else:\n indices = torch.arange(batch_size)\n\n if denoise_in_grid and randomize_within_grid:\n for j in range(0, len(indices), 4):\n indices[j : j + 4] = indices[j : j + 4][torch.randperm(4)]\n\n latents = model_input.latents[indices]\n latents_mask = model_input.latents_mask[indices]\n latents_mask_uncond = model_input.latents_mask_uncond[indices]\n masked_image_latents = model_input.masked_image_latents[indices]\n masked_image_latents_uncond = model_input.masked_image_latents_uncond[indices]\n\n # expand the latents if we are doing classifier free guidance\n latent_model_input = torch.cat([latents, latents, latents])\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n\n latents_mask_input = torch.cat([latents_mask, latents_mask, latents_mask_uncond])\n masked_image_latents_input = torch.cat(\n [\n masked_image_latents,\n masked_image_latents,\n masked_image_latents_uncond,\n ]\n )\n\n # concat latents, mask, masked_image_latents in the channel dimension\n latent_model_input_cat = torch.cat(\n [latent_model_input, latents_mask_input, masked_image_latents_input],\n dim=1,\n )\n\n # TODO: save compute by skipping some text encodings if not using them in CFG\n\n noise_pred_all = self.forward_unet(\n sample=latent_model_input_cat,\n t=t,\n text_embeddings=text_embeddings,\n denoise_in_grid=denoise_in_grid,\n )\n\n noise_pred_text, noise_pred_image, noise_pred_uncond = noise_pred_all.chunk(3)\n\n noise_pred = (\n noise_pred_image\n + text_guidance_scale * (noise_pred_text - noise_pred_image)\n + image_guidance_scale * (noise_pred_image - noise_pred_uncond)\n )\n\n if multidiffusion_type == \"v_prediction\":\n v_prediction = get_v_prediction_from_epsilon(noise_pred, t, latents, self.scheduler.alphas_cumprod)\n value[indices] += v_prediction\n count[indices] += 1\n elif multidiffusion_type == \"epsilon\":\n value[indices] += noise_pred\n count[indices] += 1\n else:\n raise ValueError(\"Not implemented.\")\n\n # take the MultiDiffusion step\n final_noise_pred = torch.where(count > 0, value / count, value)\n\n if multidiffusion_type == \"v_prediction\":\n final_noise_pred = get_epsilon_from_v_prediction(\n final_noise_pred,\n t.item(),\n model_input.latents,\n self.scheduler.alphas_cumprod,\n )\n elif multidiffusion_type == \"epsilon\":\n pass\n else:\n raise ValueError(\"Not implemented.\")\n\n if only_noise_pred:\n return None, None, final_noise_pred\n\n scheduler_output = self.scheduler.step(final_noise_pred, t, model_input.latents, generator=generator)\n pred_prev_sample = scheduler_output.prev_sample\n pred_original_sample = scheduler_output.pred_original_sample\n\n assert not pred_prev_sample.isnan().any()\n assert not pred_original_sample.isnan().any()\n return pred_prev_sample, pred_original_sample, final_noise_pred\n\n def get_model_input(\n self,\n image: Float[Tensor, \"B 3 H W\"],\n mask: Float[Tensor, \"B 1 H W\"],\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n starting_image: Optional[Float[Tensor, \"B 3 H W\"]] = None,\n starting_timestep: Optional[int] = None,\n keep_grad: bool = False,\n ) -> ModelInput:\n \"\"\"Returns the inputs for the unet.\"\"\"\n\n # TODO: incorporate seeds\n\n batch_size, _, height, width = image.shape\n\n noise = randn_tensor(\n shape=(\n batch_size,\n self.vae_latent_channels,\n height // self.vae_scale_factor,\n width // self.vae_scale_factor,\n ),\n generator=generator,\n device=torch.device(self.device),\n dtype=self.dtype,\n )\n if starting_image is not None:\n assert starting_timestep is not None\n if keep_grad:\n latents = self.encode_images(starting_image)\n else:\n with torch.no_grad():\n latents = self.encode_images(starting_image)\n latents = self.scheduler.add_noise(latents, noise, starting_timestep)\n else:\n latents = noise\n\n latents_mask = torch.nn.functional.interpolate(\n mask,\n size=(height // self.vae_scale_factor, width // self.vae_scale_factor),\n mode=\"nearest\",\n )\n assert len(torch.unique(latents_mask)) <= 2\n latents_mask = latents_mask.to(device=self.device, dtype=self.dtype)\n assert len(torch.unique(mask)) <= 2\n masked_image = torch.where(mask == 0, image, 0.5)\n with torch.no_grad():\n masked_image_latents = self.encode_images(masked_image)\n\n latents_mask_uncond = torch.ones_like(latents_mask)\n masked_image_uncond = torch.ones_like(masked_image) * 0.5\n with torch.no_grad():\n masked_image_latents_uncond = self.encode_images(masked_image_uncond)\n\n model_input = ModelInput(\n latents.to(device=self.device, dtype=self.dtype),\n latents_mask.to(device=self.device, dtype=self.dtype),\n masked_image_latents.to(device=self.device, dtype=self.dtype),\n latents_mask_uncond.to(device=self.device, dtype=self.dtype),\n masked_image_latents_uncond.to(device=self.device, dtype=self.dtype),\n noise.to(device=self.device, dtype=self.dtype),\n )\n\n return model_input\n\n def get_loss(\n self,\n x0: Float[Tensor, \"B 3 H W\"],\n mask: Float[Tensor, \"B 1 H W\"],\n depth: Optional[Float[Tensor, \"B 1 H W\"]] = None,\n multiview_guidance_scale: float = 0.0,\n reconstruction_guidance_scale: float = 0.0,\n feature_extractor: Optional[FeatureExtractor] = None,\n multiview_metric: Optional[MultiviewMetric] = None,\n K: Optional[Float[Tensor, \"B 3 3\"]] = None,\n c2w: Optional[Float[Tensor, \"B 3 4\"]] = None,\n output_folder: Optional[Path] = None,\n step: int = 0,\n guidance_step: int = 0,\n starting_image: Optional[Float[Tensor, \"B 3 H W\"]] = None,\n ):\n \"\"\"Losses on the VAE decoded images x0.\n The multi-view loss is applied where mask == 0.0 (regions that have known depth).\n \"\"\"\n\n loss = 0.0\n\n if multiview_guidance_scale != 0.0:\n features = feature_extractor(x0.to(feature_extractor.device)).to(self.device)\n\n # multiview guidance\n scale_factor = features.shape[-1] / x0.shape[-1]\n K_scaled = rescale_intrinsics(K, scale_factor, scale_factor)\n mask_scaled = 1.0 - torch.nn.functional.interpolate(mask, scale_factor=scale_factor, mode=\"nearest\")\n depth_scaled = torch.nn.functional.interpolate(depth, scale_factor=scale_factor, mode=\"bilinear\")\n for cam1 in range(len(c2w)):\n for cam2 in range(cam1 + 1, len(c2w)):\n loss_mv, loss_dict = multiview_metric(\n features1=features[cam1 : cam1 + 1],\n features2=features[cam2 : cam2 + 1],\n K1=K_scaled[cam1 : cam1 + 1],\n K2=K_scaled[cam2 : cam2 + 1],\n c2w1=c2w[cam1 : cam1 + 1],\n c2w2=c2w[cam2 : cam2 + 1],\n image1=x0[cam1 : cam1 + 1],\n image2=x0[cam2 : cam2 + 1],\n mask1=mask_scaled[cam1 : cam1 + 1],\n mask2=mask_scaled[cam2 : cam2 + 1],\n depth1=depth_scaled[cam1 : cam1 + 1],\n depth2=depth_scaled[cam2 : cam2 + 1],\n output_folder=output_folder if (cam1 == 0 and guidance_step == 0) else None,\n suffix=f\"-{step:06d}-{cam1:06d}-{cam2:06d}-{guidance_step:06d}\",\n )\n loss += multiview_guidance_scale * loss_mv.sum()\n\n if reconstruction_guidance_scale != 0.0:\n loss += (\n reconstruction_guidance_scale * (((starting_image.to(x0.device) - x0) * mask.to(x0.device)) ** 2).mean()\n )\n\n return loss\n\n @torch.cuda.amp.autocast(enabled=True)\n def get_image(\n self,\n text_embeddings,\n image: Float[Tensor, \"B 3 H W\"],\n mask: Float[Tensor, \"B 1 H W\"],\n num_inference_steps: int = 20,\n denoise_in_grid: bool = False,\n depth: Optional[Float[Tensor, \"B 1 H W\"]] = None,\n text_guidance_scale: Optional[float] = None,\n image_guidance_scale: Optional[float] = None,\n multidiffusion_steps: int = 1,\n multidiffusion_type: str = \"epsilon\",\n randomize_latents: bool = False,\n randomize_within_grid: bool = False,\n use_decoder_approximation: bool = False,\n multiview_guidance_scale: float = 0.0,\n reconstruction_guidance_scale: float = 0.0,\n feature_extractor: Optional[FeatureExtractor] = None,\n multiview_metric: Optional[MultiviewMetric] = None,\n K: Optional[Float[Tensor, \"B 3 3\"]] = None,\n c2w: Optional[Float[Tensor, \"B 3 4\"]] = None,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n show_multiview: bool = False,\n guidance_steps: List[int] = [5],\n num_guidance_steps: int = 10,\n classifier_guidance_scale: float = 0.0,\n output_folder: Optional[Path] = None,\n starting_image: Optional[Float[Tensor, \"B 3 H W\"]] = None,\n starting_lower_bound: Optional[float] = None,\n starting_upper_bound: Optional[float] = None,\n classifier_guidance_loss_rescale=1000.0,\n classifier_guidance_start_step: int = 0,\n replace_original_pixels: bool = False,\n ) -> Float[Tensor, \"B 3 H W\"]:\n \"\"\"Run the denoising sampling process, also known as the reverse process.\n Inpaint where mask == 1.\n If output folder is not None, then save images to this folder.\n\n Args:\n text_embeddings: Either 2 per image (BB) or 2 total, which will use the same cond. and uncond. prompts for all.\n loss_rescale: To prevent fp16 underflow\n \"\"\"\n\n if output_folder:\n output_folder.mkdir(parents=True, exist_ok=True)\n\n batch_size, _, height, width = image.shape\n\n if starting_lower_bound is not None:\n min_step = int(self.num_train_timesteps * starting_lower_bound)\n max_step = int(self.num_train_timesteps * starting_upper_bound)\n # select t, set multi-step diffusion\n T = torch.randint(min_step, max_step + 1, [1], dtype=torch.long, device=self.device)\n self.scheduler.config.num_train_timesteps = T.item()\n else:\n self.scheduler.config.num_train_timesteps = self.num_train_timesteps\n\n self.scheduler.set_timesteps(num_inference_steps, device=self.device)\n\n model_input = self.get_model_input(\n image=image,\n mask=mask,\n generator=generator,\n # self.scheduler.config.num_train_timesteps == 1000 is equivalent to starting_lower_bound and starting_upper_bound both being 1\n # so start with full noise by setting this to None\n starting_image=starting_image if self.scheduler.config.num_train_timesteps != 1000 else None,\n starting_timestep=self.scheduler.timesteps[0],\n )\n\n if depth is None:\n depth = torch.zeros_like(mask)\n\n progress = Progress(\n TextColumn(\"[progress.description]{task.description}\"),\n BarColumn(),\n TaskProgressColumn(),\n TimeElapsedColumn(),\n )\n task1 = progress.add_task(\n f\"[green]Inpainting batch of images...\",\n total=len(self.scheduler.timesteps),\n )\n\n with progress:\n for i, t in enumerate(self.scheduler.timesteps):\n start_time = time.time()\n\n # DragDiffusion style guidance (\"drag\")\n use_drag_guidance = (\n multiview_guidance_scale != 0.0 or reconstruction_guidance_scale != 0.0\n ) and i in guidance_steps\n if use_drag_guidance:\n model_input.latents = model_input.latents.to(torch.float32).detach().requires_grad_(True)\n scaler = torch.cuda.amp.GradScaler()\n optimizer = torch.optim.Adam([model_input.latents], lr=1e-2)\n for guidance_step in range(num_guidance_steps):\n _, pred_original_sample, _ = self.get_noise_pred(\n t,\n model_input,\n text_embeddings,\n text_guidance_scale=text_guidance_scale,\n image_guidance_scale=image_guidance_scale,\n denoise_in_grid=denoise_in_grid,\n multidiffusion_steps=1,\n multidiffusion_type=multidiffusion_type,\n randomize_latents=randomize_latents,\n randomize_within_grid=randomize_within_grid,\n )\n x0 = self.decode_latents(\n pred_original_sample,\n use_decoder_approximation=use_decoder_approximation,\n ).to(torch.float32)\n x0 = torch.where(mask == 0, image, x0) if replace_original_pixels else x0\n\n if output_folder:\n image_x0 = torch.cat(list(x0.permute(0, 2, 3, 1)), dim=1).detach().cpu()\n mediapy.write_image(\n output_folder / f\"x0-{i:06d}-{guidance_step:06d}.png\",\n image_x0,\n )\n\n loss = self.get_loss(\n x0=x0,\n mask=mask,\n depth=depth,\n multiview_guidance_scale=multiview_guidance_scale,\n reconstruction_guidance_scale=reconstruction_guidance_scale,\n feature_extractor=feature_extractor,\n multiview_metric=multiview_metric,\n K=K,\n c2w=c2w,\n output_folder=output_folder / \"drag_guidance\",\n step=i,\n guidance_step=guidance_step,\n starting_image=starting_image,\n )\n if wandb.run is not None:\n wandb.log({f\"{output_folder.name}/drag_guidance_loss-{i}\": loss})\n\n optimizer.zero_grad()\n assert not loss.isnan().any()\n scaler.scale(loss).backward()\n\n assert not model_input.latents.grad.isnan().any()\n # print(\n # model_input.latents.grad.abs().mean(),\n # (model_input.latents.grad == 0.0).sum() / model_input.latents.grad.numel(),\n # )\n\n scaler.step(optimizer)\n assert not model_input.latents.isnan().any()\n assert not depth.isnan().any()\n scaler.update()\n\n # take a step\n use_classifier_guidance = classifier_guidance_scale != 0.0 and i >= classifier_guidance_start_step\n model_input.latents = (\n model_input.latents.to(self.dtype).detach().requires_grad_(use_classifier_guidance)\n )\n with torch.enable_grad() if use_classifier_guidance else torch.no_grad():\n _, pred_original_sample, noise_pred = self.get_noise_pred(\n t,\n model_input,\n text_embeddings,\n text_guidance_scale=text_guidance_scale,\n image_guidance_scale=image_guidance_scale,\n denoise_in_grid=denoise_in_grid,\n multidiffusion_steps=multidiffusion_steps,\n multidiffusion_type=multidiffusion_type,\n randomize_latents=randomize_latents,\n randomize_within_grid=randomize_within_grid,\n )\n\n # classifier guidance (\"classifier\")\n if use_classifier_guidance:\n x0 = self.decode_latents(\n pred_original_sample,\n use_decoder_approximation=use_decoder_approximation,\n ).to(torch.float32)\n x0 = torch.where(mask == 0, image, x0) if replace_original_pixels else x0\n\n loss = self.get_loss(\n x0=x0,\n mask=mask,\n depth=depth,\n multiview_guidance_scale=multiview_guidance_scale,\n reconstruction_guidance_scale=reconstruction_guidance_scale,\n feature_extractor=feature_extractor,\n multiview_metric=multiview_metric,\n K=K,\n c2w=c2w,\n output_folder=output_folder / \"classifier_guidance\",\n step=i,\n guidance_step=0,\n starting_image=starting_image,\n )\n if wandb.run is not None:\n wandb.log({f\"{output_folder.name}/classifier_guidance_loss\": loss})\n\n grad = (\n torch.autograd.grad(\n classifier_guidance_loss_rescale * loss,\n model_input.latents,\n )[0]\n / classifier_guidance_loss_rescale\n )\n # print(\n # grad.abs().mean(),\n # (grad == 0.0).sum() / grad.numel(),\n # )\n noise_pred = noise_pred + classifier_guidance_scale * grad\n\n model_input.latents = model_input.latents.detach().requires_grad_(False)\n scheduler_output = self.scheduler.step(noise_pred, t, model_input.latents, generator=generator)\n model_input.latents = scheduler_output.prev_sample\n\n if output_folder:\n # save the denoised x0\n with torch.no_grad():\n x0 = self.decode_latents(\n pred_original_sample,\n use_decoder_approximation=use_decoder_approximation,\n ).to(torch.float32)\n x0 = torch.where(mask == 0, image, x0) if replace_original_pixels else x0\n\n if use_drag_guidance or use_classifier_guidance:\n loss = self.get_loss(\n x0=x0,\n mask=mask,\n depth=depth,\n multiview_guidance_scale=multiview_guidance_scale,\n reconstruction_guidance_scale=reconstruction_guidance_scale,\n feature_extractor=feature_extractor,\n multiview_metric=multiview_metric,\n K=K,\n c2w=c2w,\n output_folder=None,\n step=i,\n guidance_step=0,\n starting_image=starting_image,\n )\n if wandb.run is not None:\n wandb.log({f\"{output_folder.name}/loss\": loss})\n\n image_x0 = torch.cat(list(x0.permute(0, 2, 3, 1)), dim=1).detach().cpu()\n mediapy.write_image(output_folder / \"x0.png\", image_x0)\n mediapy.write_image(output_folder / f\"x0-{i:06d}.png\", image_x0)\n\n progress.update(task1, advance=1)\n end_time = time.time()\n # print(f\"[green]Time for iter {i}:\", end_time - start_time)\n\n if output_folder:\n output_filename = str(output_folder) + \".mp4\"\n CONSOLE.print(f\"[green]Saving video to {output_filename}\")\n save_video_from_path(\n path=output_folder,\n glob_str=\"x0*png\",\n sec=10,\n output_filename=output_filename,\n )\n\n with torch.no_grad():\n x0 = self.decode_latents(\n model_input.latents.detach(),\n use_decoder_approximation=use_decoder_approximation,\n ).to(torch.float32)\n x0 = torch.where(mask == 0, image, x0) if replace_original_pixels else x0\n return x0\n\n def encode_images(self, imgs: Float[Tensor, \"B 3 512 512\"]) -> Float[Tensor, \"B 4 64 64\"]:\n imgs = imgs * 2.0 - 1.0\n sampled_posterior = self.vae.encode(imgs.to(self.vae_device), return_dict=False)[0].sample().to(self.device)\n latents = sampled_posterior * 0.18215\n return latents\n\n def decode_latents(\n self,\n latents: Float[Tensor, \"B 4 H W\"],\n use_decoder_approximation: bool = False,\n ) -> Float[Tensor, \"B 3 Hout Wout\"]:\n if use_decoder_approximation:\n da = get_decoder_approximation().to(latents)\n x = torch.nn.functional.interpolate(latents, scale_factor=self.vae_scale_factor, mode=\"bilinear\")\n x = torch.matmul(x.permute(0, 2, 3, 1), da).permute(0, 3, 1, 2)\n return x\n else:\n scaled_latents = 1 / 0.18215 * latents\n image = self.vae.decode(scaled_latents.to(self.vae_device), return_dict=False)[0].to(self.device)\n image = (image * 0.5 + 0.5).clamp(0, 1)\n return image\n\n def sds_loss(\n self,\n text_embeddings: Union[Float[Tensor, \"BB 77 768\"], Float[Tensor, \"2 77 768\"]],\n image: Float[Tensor, \"B 3 H W\"],\n mask: Float[Tensor, \"B 1 H W\"],\n starting_image: Float[Tensor, \"B 3 H W\"],\n text_guidance_scale: Optional[float] = None,\n image_guidance_scale: Optional[float] = None,\n starting_lower_bound: float = 0.02,\n starting_upper_bound: float = 0.98,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n ) -> torch.Tensor:\n \"\"\"Score Distilation Sampling loss proposed in DreamFusion paper (https://dreamfusion3d.github.io/)\n Args:\n text_embeddings: Text embeddings\n image: Rendered image\n mask: Mask, inpaint where 1\n text_guidance_scale: How much to weigh the guidance\n image_guidance_scale: How much to weigh the guidance\n Returns:\n The loss\n \"\"\"\n\n # NOTE: doesn't work for gridding right now\n\n batch_size, _, height, width = image.shape\n\n min_step = int(self.num_train_timesteps * starting_lower_bound)\n max_step = int(self.num_train_timesteps * starting_upper_bound)\n\n t = torch.randint(min_step, max_step + 1, [1], dtype=torch.long, device=self.device)\n\n model_input = self.get_model_input(\n image=image,\n mask=mask,\n generator=generator,\n starting_image=starting_image,\n starting_timestep=t,\n keep_grad=True,\n )\n\n # predict the noise residual with unet, NO grad!\n with torch.no_grad():\n _, _, noise_pred = self.get_noise_pred(\n t,\n model_input,\n text_embeddings,\n text_guidance_scale=text_guidance_scale,\n image_guidance_scale=image_guidance_scale,\n only_noise_pred=True,\n )\n\n # w(t), sigma_t^2\n w = 1 - self.alphas[t]\n\n grad = w * (noise_pred - model_input.noise)\n grad = torch.nan_to_num(grad)\n\n target = (model_input.latents - grad).detach()\n loss = (\n 0.5\n * torch.nn.functional.mse_loss(model_input.latents, target, reduction=\"sum\")\n / model_input.latents.shape[0]\n )\n\n return loss" }, { "identifier": "LaMaInpainter", "path": "nerfiller/inpaint/lama_inpainter.py", "snippet": "class LaMaInpainter:\n \"\"\"LaMa inpainter model.\"\"\"\n\n def __init__(self, device: str = \"cuda:0\", model_path: Path = Path(\"data/models/big-lama\")):\n print(f\"Loading LaMa inpainter ...\")\n\n self.device = device\n\n train_config_path = os.path.join(model_path, \"config.yaml\")\n with open(train_config_path, \"r\") as f:\n train_config = OmegaConf.create(yaml.safe_load(f))\n\n train_config.training_model.predict_only = True\n train_config.visualizer.kind = \"noop\"\n\n checkpoint_path = os.path.join(model_path, \"models\", \"best.ckpt\")\n\n self.model = load_checkpoint(train_config, checkpoint_path, strict=False, map_location=\"cpu\")\n self.model.freeze()\n self.model.to(self.device)\n\n def get_image(self, image: Float[Tensor, \"B 3 H W\"], mask: Float[Tensor, \"B 1 H W\"]):\n with torch.no_grad():\n batch = {}\n batch[\"image\"] = image\n batch[\"mask\"] = mask\n batch = self.model(batch)\n inpainted_image = batch[\"inpainted\"]\n return inpainted_image" }, { "identifier": "parse_nerfstudio_frame", "path": "nerfiller/nerf/dataset_utils.py", "snippet": "def parse_nerfstudio_frame(\n transforms: Dict,\n data_path: Path,\n idx: int,\n depth_max: int = None,\n device: str = \"cpu\",\n size: Optional[Tuple[int, int]] = None,\n dtype=torch.float32,\n):\n \"\"\"Parses a Nerfstudio frame, where idx == 0 is the first image sorted by filename.\n The frames are not normally sorted, but we sort them before doing any operations.\n We return processed information where we load images, depth maps, and masks, useful for inpainting this dataset.\n Size will resize the image to (height, width).\n \"\"\"\n sorted_frames = sorted(transforms[\"frames\"], key=lambda x: x[\"file_path\"])\n imf = data_path / Path(sorted_frames[idx][\"file_path\"])\n image = torch.from_numpy(mediapy.read_image(imf) / 255.0).permute(2, 0, 1)[None].to(dtype).to(device)\n if \"mask_path\" in sorted_frames[idx]:\n maf = data_path / Path(sorted_frames[idx][\"mask_path\"])\n mask = 1 - torch.from_numpy(mediapy.read_image(maf) / 255.0)[None, None].to(dtype).to(device)\n else:\n mask = torch.zeros_like(image[:, :1])\n if \"depth_file_path\" in sorted_frames[idx]:\n daf = data_path / Path(sorted_frames[idx][\"depth_file_path\"])\n depth = torch.from_numpy(np.load(daf))[None, None].to(dtype).to(device)\n else:\n depth = torch.zeros_like(image[:, :1])\n # image *= 1 - mask\n # depth *= 1 - mask\n if depth_max:\n depth[depth > depth_max] = 0.0\n # check if the values are stored per frame\n if \"fl_x\" in sorted_frames[idx]:\n fx = sorted_frames[idx][\"fl_x\"]\n fy = sorted_frames[idx][\"fl_y\"]\n cx = sorted_frames[idx][\"cx\"]\n cy = sorted_frames[idx][\"cy\"]\n else:\n fx = transforms[\"fl_x\"]\n fy = transforms[\"fl_y\"]\n cx = transforms[\"cx\"]\n cy = transforms[\"cy\"]\n K = torch.tensor([[fx, 0, cx], [0, fy, cy], [0, 0, 1]], dtype=torch.float32, device=device)\n c2wh = torch.tensor(sorted_frames[idx][\"transform_matrix\"]).to(torch.float32).to(device)\n c2w = c2wh[:3]\n w2ch = torch.inverse(c2wh)\n w2c = w2ch[:3]\n K = K[None]\n c2w = c2w[None]\n\n if size:\n scale_factor_x = size[1] / image.shape[-1]\n scale_factor_y = size[0] / image.shape[-2]\n image = torch.nn.functional.interpolate(image, size=size, mode=\"bilinear\")\n depth = torch.nn.functional.interpolate(depth, size=size, mode=\"bilinear\")\n mask = torch.nn.functional.interpolate(mask, size=size, mode=\"nearest\")\n K = rescale_intrinsics(K, scale_factor_x, scale_factor_y)\n\n return image, depth, mask, c2w, K" }, { "identifier": "get_inpainted_image_row", "path": "nerfiller/utils/image_utils.py", "snippet": "def get_inpainted_image_row(\n image: Float[Tensor, \"B 3 H W\"],\n mask: Float[Tensor, \"B 1 H W\"],\n inpainted_image: Optional[Float[Tensor, \"B 3 H W\"]] = None,\n color: Tuple[float, float, float] = Colors.NEON_PINK.value,\n show_original: bool = False,\n):\n \"\"\"Returns an image concatenated along the x-axis. It has the following form:\n image with inpaint regions highlighted | image with inpainted regions\n Inpaint where mask == 1.\n The default color is neon pink.\n If the inpainted image is None, then just show the `image with inpaint regions highlighted`.\n \"\"\"\n device = image.device\n c = torch.tensor(color, device=device).view(1, 3, 1, 1)\n color_image = torch.ones_like(image) * c\n image_with_highlights = torch.where(mask == 1, color_image, image)\n image_list = [image_with_highlights]\n if inpainted_image is not None:\n image_list = image_list + [inpainted_image]\n if show_original:\n image_list = [image] + image_list\n im = torch.cat(image_list, dim=-2)\n return im" }, { "identifier": "rescale_intrinsics", "path": "nerfiller/utils/camera_utils.py", "snippet": "def rescale_intrinsics(Ks: Float[Tensor, \"B 3 3 3\"], scale_factor_x: float, scale_factor_y: float):\n Ks_new = Ks.clone()\n Ks_new[:, 0:1] *= scale_factor_x\n Ks_new[:, 1:2] *= scale_factor_y\n return Ks_new" }, { "identifier": "InpaintConfig", "path": "nerfiller/configs/inpaint.py", "snippet": "class InpaintConfig:" }, { "identifier": "register_extended_attention", "path": "nerfiller/utils/diff_utils.py", "snippet": "def register_extended_attention(unet):\n \"\"\"Method from Tune-A-Video, but code modified from TokenFlow codebase.\"\"\"\n\n def sa_forward(self):\n to_out = self.to_out\n if type(to_out) is torch.nn.modules.container.ModuleList:\n to_out = self.to_out[0]\n else:\n to_out = self.to_out\n\n def forward(x, encoder_hidden_states=None, attention_mask=None):\n batch_size, sequence_length, dim = x.shape\n h = self.heads\n # Here we are making an assumption about passing in 3 varients of conditioning into the model\n n_frames = batch_size // 3\n is_cross = encoder_hidden_states is not None\n encoder_hidden_states = encoder_hidden_states if is_cross else x\n q = self.to_q(x)\n k = self.to_k(encoder_hidden_states)\n v = self.to_v(encoder_hidden_states)\n\n k_0 = k[:n_frames].reshape(1, n_frames * sequence_length, -1).repeat(n_frames, 1, 1)\n k_1 = k[n_frames : 2 * n_frames].reshape(1, n_frames * sequence_length, -1).repeat(n_frames, 1, 1)\n k_2 = k[2 * n_frames :].reshape(1, n_frames * sequence_length, -1).repeat(n_frames, 1, 1)\n v_0 = v[:n_frames].reshape(1, n_frames * sequence_length, -1).repeat(n_frames, 1, 1)\n v_1 = v[n_frames : 2 * n_frames].reshape(1, n_frames * sequence_length, -1).repeat(n_frames, 1, 1)\n v_2 = v[2 * n_frames :].reshape(1, n_frames * sequence_length, -1).repeat(n_frames, 1, 1)\n\n q_0 = self.head_to_batch_dim(q[:n_frames])\n q_1 = self.head_to_batch_dim(q[n_frames : 2 * n_frames])\n q_2 = self.head_to_batch_dim(q[2 * n_frames :])\n k_0 = self.head_to_batch_dim(k_0)\n k_1 = self.head_to_batch_dim(k_1)\n k_2 = self.head_to_batch_dim(k_2)\n v_0 = self.head_to_batch_dim(v_0)\n v_1 = self.head_to_batch_dim(v_1)\n v_2 = self.head_to_batch_dim(v_2)\n\n out_0 = []\n out_1 = []\n out_2 = []\n\n q_0 = q_0.view(n_frames, h, sequence_length, dim // h)\n k_0 = k_0.view(n_frames, h, sequence_length * n_frames, dim // h)\n v_0 = v_0.view(n_frames, h, sequence_length * n_frames, dim // h)\n q_1 = q_1.view(n_frames, h, sequence_length, dim // h)\n k_1 = k_1.view(n_frames, h, sequence_length * n_frames, dim // h)\n v_1 = v_1.view(n_frames, h, sequence_length * n_frames, dim // h)\n q_2 = q_2.view(n_frames, h, sequence_length, dim // h)\n k_2 = k_2.view(n_frames, h, sequence_length * n_frames, dim // h)\n v_2 = v_2.view(n_frames, h, sequence_length * n_frames, dim // h)\n\n for j in range(h):\n sim_0 = torch.bmm(q_0[:, j], k_0[:, j].transpose(-1, -2)) * self.scale\n sim_1 = torch.bmm(q_1[:, j], k_1[:, j].transpose(-1, -2)) * self.scale\n sim_2 = torch.bmm(q_2[:, j], k_2[:, j].transpose(-1, -2)) * self.scale\n\n out_0.append(torch.bmm(sim_0.softmax(dim=-1), v_0[:, j]))\n out_1.append(torch.bmm(sim_1.softmax(dim=-1), v_1[:, j]))\n out_2.append(torch.bmm(sim_2.softmax(dim=-1), v_2[:, j]))\n\n out_0 = (\n torch.cat(out_0, dim=0)\n .view(h, n_frames, sequence_length, dim // h)\n .permute(1, 0, 2, 3)\n .reshape(h * n_frames, sequence_length, -1)\n )\n out_1 = (\n torch.cat(out_1, dim=0)\n .view(h, n_frames, sequence_length, dim // h)\n .permute(1, 0, 2, 3)\n .reshape(h * n_frames, sequence_length, -1)\n )\n out_2 = (\n torch.cat(out_2, dim=0)\n .view(h, n_frames, sequence_length, dim // h)\n .permute(1, 0, 2, 3)\n .reshape(h * n_frames, sequence_length, -1)\n )\n\n out = torch.cat([out_0, out_1, out_2], dim=0)\n out = self.batch_to_head_dim(out)\n\n return to_out(out)\n\n return forward\n\n for _, unet_module in unet.named_modules():\n if isinstance_str(unet_module, \"BasicTransformerBlock\"):\n module = unet_module.attn1\n module.forward = sa_forward(module)\n\n res_dict = {1: [1, 2], 2: [0, 1, 2], 3: [0, 1, 2]}\n # we are injecting attention in blocks 4 - 11 of the decoder, so not in the first block of the lowest resolution\n for res in res_dict:\n for block in res_dict[res]:\n module = unet.up_blocks[res].attentions[block].transformer_blocks[0].attn1\n module.forward = sa_forward(module)" }, { "identifier": "downscale_mask", "path": "nerfiller/utils/mask_utils.py", "snippet": "def downscale_mask(mask, size=None, scale_factor=None, dilate_iters=0, dilate_kernel_size=3):\n \"\"\"\n Downscale the mask in a conservative way. 1s are where to inpaint, 0 where to not inpaint.\n Inpaints extra pixels to prevent leakage under the mask.\n \"\"\"\n assert size or scale_factor\n if size:\n assert scale_factor is None\n if scale_factor:\n assert size is None\n for _ in range(dilate_iters):\n mask = dilate(mask, kernel_size=dilate_kernel_size)\n mask = torch.nn.functional.interpolate(mask, size=size, scale_factor=scale_factor, mode=\"bilinear\")\n mask = (mask != 0.0).float() # expands the mask slightly for no leakage of pixels\n return mask" } ]
import json import shutil import mediapy import torch import tyro import math from pathlib import Path from nerfiller.inpaint.rgb_inpainter import RGBInpainter from nerfiller.inpaint.lama_inpainter import LaMaInpainter from nerfiller.nerf.dataset_utils import parse_nerfstudio_frame from nerfiller.utils.image_utils import get_inpainted_image_row from nerfiller.utils.camera_utils import rescale_intrinsics from nerfiller.configs.inpaint import InpaintConfig, AnnotatedBaseConfigUnion from datetime import datetime from nerfiller.utils.diff_utils import register_extended_attention from nerfiller.utils.mask_utils import downscale_mask
10,571
def main( config: InpaintConfig, ): """ Inpaint a Nerfstudio dataset where the masks == 0. """ if config.method_name == "individual-lama":
def main( config: InpaintConfig, ): """ Inpaint a Nerfstudio dataset where the masks == 0. """ if config.method_name == "individual-lama":
rgb_inpainter = LaMaInpainter(device=config.device, model_path=Path("data/models/big-lama"))
1
2023-12-07 19:12:08+00:00
12k
nnanhuang/Customize-it-3D
ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('data/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "default", "path": "ldm/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "ismap", "path": "ldm/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "isimage", "path": "ldm/util.py", "snippet": "def isimage(x):\n if not isinstance(x,torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "mean_flat", "path": "ldm/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" }, { "identifier": "count_params", "path": "ldm/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "LitEma", "path": "ldm/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0,dtype=torch.int) if use_num_upates\n else torch.tensor(-1,dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n #remove as '.'-character is not allowed in buffers\n s_name = name.replace('.','')\n self.m_name2s_name.update({name:s_name})\n self.register_buffer(s_name,p.clone().detach().data)\n\n self.collected_params = []\n\n def forward(self,model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay,(1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "normal_kl", "path": "ldm/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "DiagonalGaussianDistribution", "path": "ldm/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean" }, { "identifier": "VQModelInterface", "path": "ldm/models/autoencoder.py", "snippet": "class VQModelInterface(VQModel):\n def __init__(self, embed_dim, *args, **kwargs):\n super().__init__(embed_dim=embed_dim, *args, **kwargs)\n self.embed_dim = embed_dim\n\n def encode(self, x):\n h = self.encoder(x)\n h = self.quant_conv(h)\n return h\n\n def decode(self, h, force_not_quantize=False):\n # also go through quantization layer\n if not force_not_quantize:\n quant, emb_loss, info = self.quantize(h)\n else:\n quant = h\n quant = self.post_quant_conv(quant)\n dec = self.decoder(quant)\n return dec" }, { "identifier": "IdentityFirstStage", "path": "ldm/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "AutoencoderKL", "path": "ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ):\n super().__init__()\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2*ddconfig[\"z_channels\"], 2*embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels)==int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n self.log(\"aeloss\", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n\n self.log(\"discloss\", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return discloss\n\n def validation_step(self, batch, batch_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\")\n\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\")\n\n self.log(\"val/rec_loss\", log_dict_ae[\"val/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n opt_ae = torch.optim.Adam(list(self.encoder.parameters())+\n list(self.decoder.parameters())+\n list(self.quant_conv.parameters())+\n list(self.post_quant_conv.parameters()),\n lr=lr, betas=(0.5, 0.9))\n opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),\n lr=lr, betas=(0.5, 0.9))\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.*(x-x.min())/(x.max()-x.min()) - 1.\n return x" }, { "identifier": "make_beta_schedule", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "extract_into_tensor", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "noise_like", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DDIMSampler", "path": "ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def to(self, device):\n \"\"\"Same as to in torch module\n Don't really underestand why this isn't a module in the first place\"\"\"\n for k, v in self.__dict__.items():\n if isinstance(v, torch.Tensor):\n new_v = getattr(self, k).to(device)\n setattr(self, k, new_v)\n\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list): ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n # print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,\n t_start=-1):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n timesteps = timesteps[:t_start]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold)\n img, pred_x0 = outs\n if callback:\n img = callback(i, img, pred_x0)\n if img_callback: \n img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n dynamic_threshold=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n e_t = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [torch.cat([\n unconditional_conditioning[k][i],\n c[k][i]]) for i in range(len(c[k]))]\n else:\n c_in[k] = torch.cat([\n unconditional_conditioning[k],\n c[k]])\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n\n print(t, sqrt_one_minus_at, a_t)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n pred_x0 = norm_thresholding(pred_x0, dynamic_threshold)\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,\n unconditional_guidance_scale=1.0, unconditional_conditioning=None):\n num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc='Encoding Image'):\n t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)\n if unconditional_guidance_scale == 1.:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c))), 2)\n noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = alphas_next[i].sqrt() * (\n (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred\n x_next = xt_weighted + weighted_noise_pred\n if return_intermediates and i % (\n num_steps // return_intermediates) == 0 and i < num_steps - 1:\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n\n out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}\n if return_intermediates:\n out.update({'intermediates': intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n return x_dec" }, { "identifier": "CrossAttention", "path": "ldm/modules/attention.py", "snippet": "class CrossAttention(nn.Module):\n def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.):\n super().__init__()\n inner_dim = dim_head * heads\n context_dim = default(context_dim, query_dim)\n\n self.scale = dim_head ** -0.5\n self.heads = heads\n\n self.to_q = nn.Linear(query_dim, inner_dim, bias=False)\n self.to_k = nn.Linear(context_dim, inner_dim, bias=False)\n self.to_v = nn.Linear(context_dim, inner_dim, bias=False)\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, query_dim),\n nn.Dropout(dropout)\n )\n\n def forward(self, x, context=None, mask=None):\n h = self.heads\n\n q = self.to_q(x)\n context = default(context, x)\n k = self.to_k(context)\n v = self.to_v(context)\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))\n\n sim = einsum('b i d, b j d -> b i j', q, k) * self.scale\n\n if exists(mask):\n mask = rearrange(mask, 'b ... -> b (...)')\n max_neg_value = -torch.finfo(sim.dtype).max\n mask = repeat(mask, 'b j -> (b h) () j', h=h)\n sim.masked_fill_(~mask, max_neg_value)\n\n # attention, what we cannot get enough of\n attn = sim.softmax(dim=-1)\n\n out = einsum('b i j, b j d -> b i d', attn, v)\n out = rearrange(out, '(b h) n d -> b n (h d)', h=h)\n return self.to_out(out)" } ]
import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import itertools from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager, nullcontext from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.rank_zero import rank_zero_only from omegaconf import ListConfig from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ldm.models.diffusion.ddim import DDIMSampler from ldm.modules.attention import CrossAttention
10,071
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., make_it_fit=False, ucg_training=None, ): super().__init__() assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., make_it_fit=False, ucg_training=None, ): super().__init__() assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
if exists(given_betas):
1
2023-12-14 11:03:35+00:00
12k
TaoHuang13/diffusion_reward
scripts/train_vqdiffusion.py
[ { "identifier": "build_dataloader", "path": "diffusion_reward/models/video_models/vqdiffusion/data/build.py", "snippet": "def build_dataloader(config, args=None, return_dataset=True):\n dataset_cfg = config['dataloader']\n train_dataset = []\n for ds_cfg in dataset_cfg['train_datasets']:\n ds_cfg['params']['data_root'] = dataset_cfg.get('data_root', '')\n ds = instantiate_from_config(ds_cfg)\n train_dataset.append(ds)\n if len(train_dataset) > 1:\n train_dataset = ConcatDataset(train_dataset)\n else:\n train_dataset = train_dataset[0]\n \n val_dataset = []\n for ds_cfg in dataset_cfg['validation_datasets']:\n ds_cfg['params']['data_root'] = dataset_cfg.get('data_root', '')\n ds = instantiate_from_config(ds_cfg)\n val_dataset.append(ds)\n if len(val_dataset) > 1:\n val_dataset = ConcatDataset(val_dataset)\n else:\n val_dataset = val_dataset[0]\n \n if args is not None and args.distributed:\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset, shuffle=True)\n val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset, shuffle=False)\n train_iters = len(train_sampler) // dataset_cfg['batch_size']\n val_iters = len(val_sampler) // dataset_cfg['batch_size']\n else:\n train_sampler = None\n val_sampler = None\n train_iters = len(train_dataset) // dataset_cfg['batch_size']\n val_iters = len(val_dataset) // dataset_cfg['batch_size']\n\n # if args is not None and not args.debug:\n # num_workers = max(2*dataset_cfg['batch_size'], dataset_cfg['num_workers'])\n # num_workers = min(64, num_workers)\n # else:\n # num_workers = dataset_cfg['num_workers']\n num_workers = dataset_cfg['num_workers']\n train_loader = torch.utils.data.DataLoader(train_dataset, \n batch_size=dataset_cfg['batch_size'], \n shuffle=(train_sampler is None),\n num_workers=num_workers, \n pin_memory=True, \n sampler=train_sampler, \n drop_last=True,\n persistent_workers=True)\n\n val_loader = torch.utils.data.DataLoader(val_dataset, \n batch_size=dataset_cfg['batch_size'], \n shuffle=False, #(val_sampler is None),\n num_workers=num_workers, \n pin_memory=True, \n sampler=val_sampler, \n drop_last=True,\n persistent_workers=True)\n\n dataload_info = {\n 'train_loader': train_loader,\n 'validation_loader': val_loader,\n 'train_iterations': train_iters,\n 'validation_iterations': val_iters\n }\n \n if return_dataset:\n dataload_info['train_dataset'] = train_dataset\n dataload_info['validation_dataset'] = val_dataset\n\n return dataload_info" }, { "identifier": "launch", "path": "diffusion_reward/models/video_models/vqdiffusion/distributed/launch.py", "snippet": "def launch(fn, n_gpu_per_machine, n_machine=1, machine_rank=0, dist_url=None, args=()):\n world_size = n_machine * n_gpu_per_machine\n\n if world_size > 1:\n # if \"OMP_NUM_THREADS\" not in os.environ:\n # os.environ[\"OMP_NUM_THREADS\"] = \"1\"\n\n if dist_url == \"auto\":\n if n_machine != 1:\n raise ValueError('dist_url=\"auto\" not supported in multi-machine jobs')\n\n port = find_free_port()\n dist_url = f\"tcp://127.0.0.1:{port}\"\n\n if n_machine > 1 and dist_url.startswith(\"file://\"):\n raise ValueError(\n \"file:// is not a reliable init method in multi-machine jobs. Prefer tcp://\"\n )\n\n mp.spawn(\n distributed_worker,\n nprocs=n_gpu_per_machine,\n args=(fn, world_size, n_gpu_per_machine, machine_rank, dist_url, args),\n daemon=False,\n )\n\n else:\n local_rank = 0\n fn(local_rank, *args)" }, { "identifier": "Logger", "path": "diffusion_reward/models/video_models/vqdiffusion/engine/logger.py", "snippet": "class Logger(object):\n def __init__(self, args):\n self.args = args\n self.save_dir = args.save_dir\n self.is_primary = is_primary()\n \n if self.is_primary:\n # save the args and config\n self.config_dir = 'configs'\n os.makedirs(self.config_dir, exist_ok=True)\n file_name = os.path.join(self.config_dir, 'args.txt')\n write_args(args, file_name)\n\n log_dir = 'logs'\n if not os.path.exists(log_dir):\n os.makedirs(log_dir, exist_ok=True)\n self.text_writer = open(os.path.join(log_dir, 'log.txt'), 'a') # 'w')\n if args.tensorboard:\n self.log_info('using tensorboard')\n self.tb_writer = torch.utils.tensorboard.SummaryWriter(log_dir=log_dir) # tensorboard.SummaryWriter(log_dir=log_dir)\n else:\n self.tb_writer = None\n \n\n def save_config(self, config):\n if self.is_primary:\n save_config_to_yaml(config, os.path.join(self.config_dir, 'config.yaml'))\n\n def log_info(self, info, check_primary=True):\n if self.is_primary or (not check_primary):\n print(info)\n if self.is_primary:\n info = str(info)\n time_str = time.strftime('%Y-%m-%d-%H-%M')\n info = '{}: {}'.format(time_str, info)\n if not info.endswith('\\n'):\n info += '\\n'\n self.text_writer.write(info)\n self.text_writer.flush()\n\n def add_scalar(self, **kargs):\n \"\"\"Log a scalar variable.\"\"\"\n if self.is_primary:\n if self.tb_writer is not None:\n self.tb_writer.add_scalar(**kargs)\n\n def add_scalars(self, **kargs):\n \"\"\"Log a scalar variable.\"\"\"\n if self.is_primary:\n if self.tb_writer is not None:\n self.tb_writer.add_scalars(**kargs)\n\n def add_image(self, **kargs):\n \"\"\"Log a scalar variable.\"\"\"\n if self.is_primary:\n if self.tb_writer is not None:\n self.tb_writer.add_image(**kargs)\n\n def add_images(self, **kargs):\n \"\"\"Log a scalar variable.\"\"\"\n if self.is_primary:\n if self.tb_writer is not None:\n self.tb_writer.add_images(**kargs)\n\n\n def close(self):\n if self.is_primary:\n self.text_writer.close()\n self.tb_writer.close()" }, { "identifier": "Solver", "path": "diffusion_reward/models/video_models/vqdiffusion/engine/solver.py", "snippet": "class Solver(object):\n def __init__(self, config, args, model, dataloader, logger):\n self.config = config\n self.args = args\n self.model = model \n self.dataloader = dataloader\n self.logger = logger\n \n self.max_epochs = config['solver']['max_epochs']\n self.save_epochs = config['solver']['save_epochs']\n self.save_iterations = config['solver'].get('save_iterations', -1)\n self.sample_iterations = config['solver']['sample_iterations']\n if self.sample_iterations == 'epoch':\n self.sample_iterations = self.dataloader['train_iterations']\n self.validation_epochs = config['solver'].get('validation_epochs', 2)\n assert isinstance(self.save_epochs, (int, list))\n assert isinstance(self.validation_epochs, (int, list))\n self.debug = config['solver'].get('debug', False)\n\n self.last_epoch = -1\n self.last_iter = -1\n # self.ckpt_dir = os.path.join(args.save_dir, 'checkpoint')\n # self.image_dir = os.path.join(args.save_dir, 'images')\n self.ckpt_dir = \"checkpoint\"\n self.image_dir = \"images\"\n os.makedirs(self.ckpt_dir, exist_ok=True)\n os.makedirs(self.image_dir, exist_ok=True)\n\n # get grad_clipper\n if 'clip_grad_norm' in config['solver']:\n self.clip_grad_norm = instantiate_from_config(config['solver']['clip_grad_norm'])\n else:\n self.clip_grad_norm = None\n\n # get lr\n adjust_lr = config['solver'].get('adjust_lr', 'sqrt')\n base_lr = config['solver'].get('base_lr', 1.0e-4)\n if adjust_lr == 'none':\n self.lr = base_lr\n elif adjust_lr == 'sqrt':\n self.lr = base_lr * math.sqrt(args.world_size * config['dataloader']['batch_size'])\n elif adjust_lr == 'linear':\n self.lr = base_lr * args.world_size * config['dataloader']['batch_size']\n else:\n raise NotImplementedError('Unknown type of adjust lr {}!'.format(adjust_lr))\n self.logger.log_info('Get lr {} from base lr {} with {}'.format(self.lr, base_lr, adjust_lr))\n\n if hasattr(model, 'get_optimizer_and_scheduler') and callable(getattr(model, 'get_optimizer_and_scheduler')):\n optimizer_and_scheduler = model.get_optimizer_and_scheduler(config['solver']['optimizers_and_schedulers'])\n else:\n optimizer_and_scheduler = self._get_optimizer_and_scheduler(config['solver']['optimizers_and_schedulers'])\n\n assert type(optimizer_and_scheduler) == type({}), 'optimizer and schduler should be a dict!'\n self.optimizer_and_scheduler = optimizer_and_scheduler\n\n # configre for ema\n if 'ema' in config['solver'] and args.local_rank == 0:\n ema_args = config['solver']['ema']\n ema_args = OmegaConf.to_container(copy.deepcopy(ema_args), resolve=True)\n ema_args['model'] = self.model\n self.ema = EMA(**ema_args)\n else:\n self.ema = None\n\n self.logger.log_info(str(get_model_parameters_info(self.model)))\n self.model.cuda()\n self.device = self.model.device\n if self.args.distributed:\n self.logger.log_info('Distributed, begin DDP the model...')\n self.model = torch.nn.parallel.DistributedDataParallel(self.model, device_ids=[self.args.gpu], find_unused_parameters=False)\n self.logger.log_info('Distributed, DDP model done!')\n # prepare for amp\n self.args.amp = self.args.amp and AMP\n if self.args.amp:\n self.scaler = GradScaler()\n self.logger.log_info('Using AMP for training!')\n\n self.logger.log_info(\"{}: global rank {}: prepare solver done!\".format(self.args.exp_name,self.args.global_rank), check_primary=False)\n\n self.best_loss = float('inf')\n\n def _get_optimizer_and_scheduler(self, op_sc_list):\n optimizer_and_scheduler = {}\n for op_sc_cfg in op_sc_list:\n op_sc = {\n 'name': op_sc_cfg.get('name', 'none'),\n 'start_epoch': op_sc_cfg.get('start_epoch', 0),\n 'end_epoch': op_sc_cfg.get('end_epoch', -1),\n 'start_iteration': op_sc_cfg.get('start_iteration', 0),\n 'end_iteration': op_sc_cfg.get('end_iteration', -1),\n }\n\n if op_sc['name'] == 'none':\n # parameters = self.model.parameters()\n parameters = filter(lambda p: p.requires_grad, self.model.parameters())\n else:\n # NOTE: get the parameters with the given name, the parameters() should be overide\n parameters = self.model.parameters(name=op_sc['name'])\n \n # build optimizer\n op_cfg = op_sc_cfg.get('optimizer', {'target': 'torch.optim.SGD', 'params': {}})\n op_cfg = OmegaConf.to_container(copy.deepcopy(op_cfg), resolve=True)\n if 'params' not in op_cfg:\n op_cfg['params'] = {}\n if 'lr' not in op_cfg['params']:\n op_cfg['params']['lr'] = self.lr\n op_cfg['params']['params'] = parameters\n optimizer = instantiate_from_config(op_cfg)\n op_sc['optimizer'] = {\n 'module': optimizer,\n 'step_iteration': op_cfg.get('step_iteration', 1)\n }\n assert isinstance(op_sc['optimizer']['step_iteration'], int), 'optimizer steps should be a integer number of iterations'\n\n # build scheduler\n if 'scheduler' in op_sc_cfg:\n sc_cfg = OmegaConf.to_container(copy.deepcopy(op_sc_cfg['scheduler']), resolve=True)\n sc_cfg['params']['optimizer'] = optimizer\n # for cosine annealing lr, compute T_max\n if sc_cfg['target'].split('.')[-1] in ['CosineAnnealingLRWithWarmup', 'CosineAnnealingLR']:\n T_max = self.max_epochs * self.dataloader['train_iterations']\n sc_cfg['params']['T_max'] = T_max\n scheduler = instantiate_from_config(sc_cfg)\n op_sc['scheduler'] = {\n 'module': scheduler,\n 'step_iteration': sc_cfg.get('step_iteration', 1)\n }\n if op_sc['scheduler']['step_iteration'] == 'epoch':\n op_sc['scheduler']['step_iteration'] = self.dataloader['train_iterations']\n optimizer_and_scheduler[op_sc['name']] = op_sc\n\n return optimizer_and_scheduler\n\n def _get_lr(self, return_type='str'):\n \n lrs = {}\n for op_sc_n, op_sc in self.optimizer_and_scheduler.items():\n lr = op_sc['optimizer']['module'].state_dict()['param_groups'][0]['lr']\n lrs[op_sc_n+'_lr'] = round(lr, 10)\n if return_type == 'str':\n lrs = str(lrs)\n lrs = lrs.replace('none', 'lr').replace('{', '').replace('}','').replace('\\'', '')\n elif return_type == 'dict':\n pass \n else:\n raise ValueError('Unknow of return type: {}'.format(return_type))\n return lrs\n\n def sample(self, batch, phase='train', step_type='iteration'):\n tic = time.time()\n self.logger.log_info('Begin to sample...')\n if self.ema is not None:\n self.ema.modify_to_inference()\n suffix = '_ema'\n else:\n suffix = ''\n \n if isinstance(self.model, torch.nn.parallel.DistributedDataParallel):\n model = self.model.module\n else: \n model = self.model \n \n with torch.no_grad(): \n if self.debug == False:\n if self.args.amp:\n with autocast():\n samples = model.sample(batch=batch, step=self.last_iter)\n else:\n samples = model.sample(batch=batch, step=self.last_iter)\n else:\n samples = model.sample(batch=batch[0].cuda(), step=self.last_iter)\n\n step = self.last_iter if step_type == 'iteration' else self.last_epoch\n for k, v in samples.items():\n save_dir = os.path.join(self.image_dir, phase, k)\n os.makedirs(save_dir, exist_ok=True)\n save_path = os.path.join(save_dir, 'e{:010d}_itr{:010d}_rank{}{}'.format(self.last_epoch, self.last_iter%self.dataloader['train_iterations'], get_rank(), suffix))\n if torch.is_tensor(v) and v.dim() == 4 and v.shape[1] in [1, 3]: # image\n im = v\n im = im.to(torch.uint8)\n self.logger.add_images(tag='{}/{}e_{}itr/{}'.format(phase, self.last_epoch, self.last_iter%self.dataloader['train_iterations'], k), img_tensor=im, global_step=step, dataformats='NCHW')\n\n # save images\n im_grid = torchvision.utils.make_grid(im)\n im_grid = im_grid.permute(1, 2, 0).to('cpu').numpy()\n im_grid = Image.fromarray(im_grid)\n\n im_grid.save(save_path + '.jpg')\n self.logger.log_info('save {} to {}'.format(k, save_path+'.jpg'))\n else: # may be other values, such as text caption\n with open(save_path+'.txt', 'a') as f:\n f.write(str(v)+'\\n')\n f.close()\n self.logger.log_info('save {} to {}'.format(k, save_path+'txt'))\n \n if self.ema is not None:\n self.ema.modify_to_train()\n \n self.logger.log_info('Sample done, time: {:.2f}'.format(time.time() - tic))\n\n def step(self, batch, phase='train'):\n loss = {}\n if self.debug == False: \n for k, v in batch.items():\n if torch.is_tensor(v):\n batch[k] = v.cuda()\n else:\n batch = batch[0].cuda()\n for op_sc_n, op_sc in self.optimizer_and_scheduler.items():\n if phase == 'train':\n # check if this optimizer and scheduler is valid in this iteration and epoch\n if op_sc['start_iteration'] > self.last_iter:\n continue\n if op_sc['end_iteration'] > 0 and op_sc['end_iteration'] <= self.last_iter:\n continue\n if op_sc['start_epoch'] > self.last_epoch:\n continue\n if op_sc['end_epoch'] > 0 and op_sc['end_epoch'] <= self.last_epoch:\n continue\n\n input = {\n 'batch': batch,\n 'return_loss': True,\n 'step': self.last_iter,\n }\n if op_sc_n != 'none':\n input['name'] = op_sc_n\n\n if phase == 'train':\n if self.args.amp:\n with autocast():\n output = self.model(**input)\n else:\n output = self.model(**input)\n else:\n with torch.no_grad():\n if self.args.amp:\n with autocast():\n output = self.model(**input)\n else:\n output = self.model(**input)\n \n if phase == 'train':\n if op_sc['optimizer']['step_iteration'] > 0 and (self.last_iter + 1) % op_sc['optimizer']['step_iteration'] == 0:\n op_sc['optimizer']['module'].zero_grad()\n if self.args.amp:\n self.scaler.scale(output['loss']).backward()\n if self.clip_grad_norm is not None:\n self.clip_grad_norm(self.model.parameters())\n self.scaler.step(op_sc['optimizer']['module'])\n self.scaler.update()\n else:\n output['loss'].backward()\n if self.clip_grad_norm is not None:\n self.clip_grad_norm(self.model.parameters())\n op_sc['optimizer']['module'].step()\n \n if 'scheduler' in op_sc:\n if op_sc['scheduler']['step_iteration'] > 0 and (self.last_iter + 1) % op_sc['scheduler']['step_iteration'] == 0:\n if isinstance(op_sc['scheduler']['module'], STEP_WITH_LOSS_SCHEDULERS):\n op_sc['scheduler']['module'].step(output.get('loss'))\n else:\n op_sc['scheduler']['module'].step()\n # update ema model\n if self.ema is not None:\n self.ema.update(iteration=self.last_iter)\n\n loss[op_sc_n] = {k: v for k, v in output.items() if ('loss' in k or 'acc' in k)}\n return loss\n\n def save(self, force=False, is_best=False):\n if is_primary():\n # save with the epoch specified name\n if self.save_iterations > 0:\n if (self.last_iter + 1) % self.save_iterations == 0:\n save = True \n else:\n save = False\n else:\n if isinstance(self.save_epochs, int):\n save = (self.last_epoch + 1) % self.save_epochs == 0\n else:\n save = (self.last_epoch + 1) in self.save_epochs\n \n if save or force:\n state_dict = {\n 'last_epoch': self.last_epoch,\n 'last_iter': self.last_iter,\n 'model': self.model.module.state_dict() if isinstance(self.model, torch.nn.parallel.DistributedDataParallel) else self.model.state_dict() \n }\n if self.ema is not None:\n state_dict['ema'] = self.ema.state_dict()\n if self.clip_grad_norm is not None:\n state_dict['clip_grad_norm'] = self.clip_grad_norm.state_dict()\n\n # add optimizers and schedulers\n optimizer_and_scheduler = {}\n for op_sc_n, op_sc in self.optimizer_and_scheduler.items():\n state_ = {}\n for k in op_sc:\n if k in ['optimizer', 'scheduler']:\n op_or_sc = {kk: vv for kk, vv in op_sc[k].items() if kk != 'module'}\n op_or_sc['module'] = op_sc[k]['module'].state_dict()\n state_[k] = op_or_sc\n else:\n state_[k] = op_sc[k]\n optimizer_and_scheduler[op_sc_n] = state_\n\n state_dict['optimizer_and_scheduler'] = optimizer_and_scheduler\n \n if save:\n save_path = os.path.join(self.ckpt_dir, '{}e_{}iter.pth'.format(str(self.last_epoch).zfill(6), self.last_iter))\n torch.save(state_dict, save_path)\n self.logger.log_info('saved in {}'.format(save_path)) \n \n # save with the last name\n save_path = os.path.join(self.ckpt_dir, 'last.pth')\n torch.save(state_dict, save_path) \n self.logger.log_info('saved in {}'.format(save_path)) \n\n if is_best:\n save_path = os.path.join(self.ckpt_dir, 'best.pth')\n torch.save(state_dict, save_path) \n self.logger.log_info('best checktpoint saved in {} !!!!!!!!!!'.format(save_path)) \n \n def resume(self, \n path=None, # The path of last.pth\n load_optimizer_and_scheduler=True, # whether to load optimizers and scheduler\n load_others=True # load other informations\n ): \n if path is None:\n path = os.path.join(self.ckpt_dir, 'last.pth')\n\n if os.path.exists(path):\n state_dict = torch.load(path, map_location='cuda:{}'.format(self.args.local_rank))\n\n if load_others:\n self.last_epoch = state_dict['last_epoch']\n self.last_iter = state_dict['last_iter']\n \n if isinstance(self.model, torch.nn.parallel.DistributedDataParallel):\n try:\n self.model.module.load_state_dict(state_dict['model'])\n except:\n model_dict = self.model.module.state_dict()\n temp_state_dict = {k:v for k,v in state_dict['model'].items() if k in model_dict.keys()}\n model_dict.update(temp_state_dict)\n self.model.module.load_state_dict(model_dict)\n else:\n self.model.load_state_dict(state_dict['model'])\n\n if 'ema' in state_dict and self.ema is not None:\n try:\n self.ema.load_state_dict(state_dict['ema'])\n except:\n model_dict = self.ema.state_dict()\n temp_state_dict = {k:v for k,v in state_dict['ema'].items() if k in model_dict.keys()}\n model_dict.update(temp_state_dict)\n self.ema.load_state_dict(model_dict)\n\n if 'clip_grad_norm' in state_dict and self.clip_grad_norm is not None:\n self.clip_grad_norm.load_state_dict(state_dict['clip_grad_norm'])\n\n # handle optimizer and scheduler\n for op_sc_n, op_sc in state_dict['optimizer_and_scheduler'].items():\n for k in op_sc:\n if k in ['optimizer', 'scheduler']:\n for kk in op_sc[k]:\n if kk == 'module' and load_optimizer_and_scheduler:\n self.optimizer_and_scheduler[op_sc_n][k][kk].load_state_dict(op_sc[k][kk])\n elif load_others: # such as step_iteration, ...\n self.optimizer_and_scheduler[op_sc_n][k][kk] = op_sc[k][kk]\n elif load_others: # such as start_epoch, end_epoch, ....\n self.optimizer_and_scheduler[op_sc_n][k] = op_sc[k]\n \n self.logger.log_info('Resume from {}'.format(path))\n \n def train_epoch(self):\n self.model.train()\n self.last_epoch += 1\n\n if self.args.distributed:\n self.dataloader['train_loader'].sampler.set_epoch(self.last_epoch)\n\n epoch_start = time.time()\n itr_start = time.time()\n itr = -1\n for itr, batch in enumerate(self.dataloader['train_loader']):\n if itr == 0:\n print(\"time2 is \" + str(time.time()))\n data_time = time.time() - itr_start\n step_start = time.time()\n self.last_iter += 1\n loss = self.step(batch, phase='train')\n # logging info\n if self.logger is not None and self.last_iter % self.args.log_frequency == 0:\n info = '{}: train'.format(self.args.exp_name)\n info = info + ': Epoch {}/{} iter {}/{}'.format(self.last_epoch, self.max_epochs, self.last_iter%self.dataloader['train_iterations'], self.dataloader['train_iterations'])\n for loss_n, loss_dict in loss.items():\n info += ' ||'\n loss_dict = reduce_dict(loss_dict)\n info += '' if loss_n == 'none' else ' {}'.format(loss_n)\n # info = info + ': Epoch {}/{} iter {}/{}'.format(self.last_epoch, self.max_epochs, self.last_iter%self.dataloader['train_iterations'], self.dataloader['train_iterations'])\n for k in loss_dict:\n info += ' | {}: {:.4f}'.format(k, float(loss_dict[k]))\n self.logger.add_scalar(tag='train/{}/{}'.format(loss_n, k), scalar_value=float(loss_dict[k]), global_step=self.last_iter)\n \n # log lr\n lrs = self._get_lr(return_type='dict')\n for k in lrs.keys():\n lr = lrs[k]\n self.logger.add_scalar(tag='train/{}_lr'.format(k), scalar_value=lrs[k], global_step=self.last_iter)\n\n # add lr to info\n info += ' || {}'.format(self._get_lr())\n \n # add time consumption to info\n spend_time = time.time() - self.start_train_time\n itr_time_avg = spend_time / (self.last_iter + 1)\n info += ' || data_time: {dt}s | fbward_time: {fbt}s | iter_time: {it}s | iter_avg_time: {ita}s | epoch_time: {et} | spend_time: {st} | left_time: {lt}'.format(\n dt=round(data_time, 1),\n it=round(time.time() - itr_start, 1),\n fbt=round(time.time() - step_start, 1),\n ita=round(itr_time_avg, 1),\n et=format_seconds(time.time() - epoch_start),\n st=format_seconds(spend_time),\n lt=format_seconds(itr_time_avg*self.max_epochs*self.dataloader['train_iterations']-spend_time)\n )\n self.logger.log_info(info)\n \n itr_start = time.time()\n\n # modify here to make sure dataloader['train_iterations'] is correct\n assert itr >= 0, \"The data is too less to form one iteration!\"\n self.dataloader['train_iterations'] = itr + 1\n\n def validate_epoch(self):\n if 'validation_loader' not in self.dataloader:\n val = False\n else:\n if isinstance(self.validation_epochs, int):\n val = (self.last_epoch + 1) % self.validation_epochs == 0\n else:\n val = (self.last_epoch + 1) in self.validation_epochs \n\n is_best = False\n if val:\n if self.args.distributed:\n self.dataloader['validation_loader'].sampler.set_epoch(self.last_epoch)\n\n self.model.eval()\n overall_loss = None\n epoch_start = time.time()\n itr_start = time.time()\n itr = -1\n for itr, batch in enumerate(self.dataloader['validation_loader']):\n data_time = time.time() - itr_start\n step_start = time.time()\n loss = self.step(batch, phase='val')\n \n for loss_n, loss_dict in loss.items():\n loss[loss_n] = reduce_dict(loss_dict)\n if overall_loss is None:\n overall_loss = loss\n else:\n for loss_n, loss_dict in loss.items():\n for k, v in loss_dict.items():\n overall_loss[loss_n][k] = (overall_loss[loss_n][k] * itr + loss[loss_n][k]) / (itr + 1)\n \n if self.logger is not None and (itr+1) % self.args.log_frequency == 0:\n info = '{}: val'.format(self.args.exp_name) \n info = info + ': Epoch {}/{} | iter {}/{}'.format(self.last_epoch, self.max_epochs, itr, self.dataloader['validation_iterations'])\n for loss_n, loss_dict in loss.items():\n info += ' ||'\n info += '' if loss_n == 'none' else ' {}'.format(loss_n)\n # info = info + ': Epoch {}/{} | iter {}/{}'.format(self.last_epoch, self.max_epochs, itr, self.dataloader['validation_iterations'])\n for k in loss_dict:\n info += ' | {}: {:.4f}'.format(k, float(loss_dict[k]))\n \n itr_time_avg = (time.time() - epoch_start) / (itr + 1)\n info += ' || data_time: {dt}s | fbward_time: {fbt}s | iter_time: {it}s | epoch_time: {et} | left_time: {lt}'.format(\n dt=round(data_time, 1),\n fbt=round(time.time() - step_start, 1),\n it=round(time.time() - itr_start, 1),\n et=format_seconds(time.time() - epoch_start),\n lt=format_seconds(itr_time_avg*(self.dataloader['train_iterations']-itr-1))\n )\n \n self.logger.log_info(info)\n itr_start = time.time()\n # modify here to make sure dataloader['validation_iterations'] is correct\n assert itr >= 0, \"The data is too less to form one iteration!\"\n self.dataloader['validation_iterations'] = itr + 1\n\n if self.logger is not None:\n info = '{}: val'.format(self.args.exp_name) \n for loss_n, loss_dict in overall_loss.items():\n info += '' if loss_n == 'none' else ' {}'.format(loss_n)\n info += ': Epoch {}/{}'.format(self.last_epoch, self.max_epochs)\n for k in loss_dict:\n info += ' | {}: {:.4f}'.format(k, float(loss_dict[k]))\n self.logger.add_scalar(tag='val/{}/{}'.format(loss_n, k), scalar_value=float(loss_dict[k]), global_step=self.last_epoch)\n self.logger.log_info(info)\n\n print(overall_loss)\n val_loss = sum([loss_dict[k] for k in loss_dict])\n is_best = val_loss < self.best_loss\n self.best_loss = min(self.best_loss, val_loss)\n \n return is_best\n \n def validate(self):\n self.validation_epoch()\n\n def train(self):\n start_epoch = self.last_epoch + 1\n self.start_train_time = time.time()\n self.logger.log_info('{}: global rank {}: start training...'.format(self.args.exp_name, self.args.global_rank), check_primary=False)\n \n for epoch in range(start_epoch, self.max_epochs):\n self.train_epoch()\n is_best = self.validate_epoch()\n self.save(force=True, is_best=is_best)" }, { "identifier": "build_model", "path": "diffusion_reward/models/video_models/vqdiffusion/modeling/build.py", "snippet": "def build_model(config, args=None):\n return instantiate_from_config(config['model'])" }, { "identifier": "load_yaml_config", "path": "diffusion_reward/models/video_models/vqdiffusion/utils/io.py", "snippet": "def load_yaml_config(path):\n with open(path) as f:\n config = yaml.full_load(f)\n return config" }, { "identifier": "merge_opts_to_config", "path": "diffusion_reward/models/video_models/vqdiffusion/utils/misc.py", "snippet": "def merge_opts_to_config(config, opts):\n def modify_dict(c, nl, v):\n if len(nl) == 1:\n c[nl[0]] = type(c[nl[0]])(v)\n else:\n # print(nl)\n c[nl[0]] = modify_dict(c[nl[0]], nl[1:], v)\n return c\n\n if opts is not None and len(opts) > 0:\n assert len(opts) % 2 == 0, \"each opts should be given by the name and values! The length shall be even number!\"\n for i in range(len(opts) // 2):\n name = opts[2*i]\n value = opts[2*i+1]\n config = modify_dict(config, name.split('.'), value)\n return config " }, { "identifier": "modify_config_for_debug", "path": "diffusion_reward/models/video_models/vqdiffusion/utils/misc.py", "snippet": "def modify_config_for_debug(config):\n config['dataloader']['num_workers'] = 0\n config['dataloader']['batch_size'] = 1\n return config" }, { "identifier": "seed_everything", "path": "diffusion_reward/models/video_models/vqdiffusion/utils/misc.py", "snippet": "def seed_everything(seed, cudnn_deterministic=False):\n \"\"\"\n Function that sets seed for pseudo-random number generators in:\n pytorch, numpy, python.random\n \n Args:\n seed: the integer value seed for global random state\n \"\"\"\n if seed is not None:\n print(f\"Global seed set to {seed}\")\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n\n if cudnn_deterministic:\n torch.backends.cudnn.deterministic = True\n warnings.warn('You have chosen to seed training. '\n 'This will turn on the CUDNN deterministic setting, '\n 'which can slow down your training considerably! '\n 'You may see unexpected behavior when restarting '\n 'from checkpoints.')" } ]
import os import warnings import hydra import torch from diffusion_reward.models.video_models.vqdiffusion.data.build import \ build_dataloader from diffusion_reward.models.video_models.vqdiffusion.distributed.launch import launch from diffusion_reward.models.video_models.vqdiffusion.engine.logger import Logger from diffusion_reward.models.video_models.vqdiffusion.engine.solver import Solver from diffusion_reward.models.video_models.vqdiffusion.modeling.build import \ build_model from diffusion_reward.models.video_models.vqdiffusion.utils.io import load_yaml_config from diffusion_reward.models.video_models.vqdiffusion.utils.misc import ( merge_opts_to_config, modify_config_for_debug, seed_everything)
8,364
# environment variables NODE_RANK = os.environ['AZ_BATCHAI_TASK_INDEX'] if 'AZ_BATCHAI_TASK_INDEX' in os.environ else 0 NODE_RANK = int(NODE_RANK) MASTER_ADDR, MASTER_PORT = os.environ['AZ_BATCH_MASTER_NODE'].split(':') if 'AZ_BATCH_MASTER_NODE' in os.environ else ("127.0.0.1", 29500) MASTER_PORT = int(MASTER_PORT) DIST_URL = 'tcp://%s:%s' % (MASTER_ADDR, MASTER_PORT) @hydra.main(config_path='../diffusion_reward/configs/models/video_models/vqdiffusion', config_name='default') def main(args): args.save_dir = os.path.abspath(os.path.dirname(__file__)) args.node_rank = NODE_RANK args.dist_url = DIST_URL if args.seed is not None or args.cudnn_deterministic:
# environment variables NODE_RANK = os.environ['AZ_BATCHAI_TASK_INDEX'] if 'AZ_BATCHAI_TASK_INDEX' in os.environ else 0 NODE_RANK = int(NODE_RANK) MASTER_ADDR, MASTER_PORT = os.environ['AZ_BATCH_MASTER_NODE'].split(':') if 'AZ_BATCH_MASTER_NODE' in os.environ else ("127.0.0.1", 29500) MASTER_PORT = int(MASTER_PORT) DIST_URL = 'tcp://%s:%s' % (MASTER_ADDR, MASTER_PORT) @hydra.main(config_path='../diffusion_reward/configs/models/video_models/vqdiffusion', config_name='default') def main(args): args.save_dir = os.path.abspath(os.path.dirname(__file__)) args.node_rank = NODE_RANK args.dist_url = DIST_URL if args.seed is not None or args.cudnn_deterministic:
seed_everything(args.seed, args.cudnn_deterministic)
8
2023-12-05 02:42:28+00:00
12k
mkang315/ASF-YOLO
models/yolo.py
[ { "identifier": "check_anchor_order", "path": "utils/autoanchor.py", "snippet": "def check_anchor_order(m):\n # Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary\n a = m.anchors.prod(-1).mean(-1).view(-1) # mean anchor area per output layer\n da = a[-1] - a[0] # delta a\n ds = m.stride[-1] - m.stride[0] # delta s\n if da and (da.sign() != ds.sign()): # same order\n LOGGER.info(f'{PREFIX}Reversing anchor order')\n m.anchors[:] = m.anchors.flip(0)" }, { "identifier": "LOGGER", "path": "utils/general.py", "snippet": "LOGGER = logging.getLogger(LOGGING_NAME) # define globally (used in train.py, val.py, detect.py, etc.)" }, { "identifier": "check_version", "path": "utils/general.py", "snippet": "def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False, verbose=False):\n # Check version vs. required version\n current, minimum = (pkg.parse_version(x) for x in (current, minimum))\n result = (current == minimum) if pinned else (current >= minimum) # bool\n s = f'WARNING ⚠️ {name}{minimum} is required by YOLOv5, but {name}{current} is currently installed' # string\n if hard:\n assert result, emojis(s) # assert min requirements met\n if verbose and not result:\n LOGGER.warning(s)\n return result" }, { "identifier": "check_yaml", "path": "utils/general.py", "snippet": "def check_yaml(file, suffix=('.yaml', '.yml')):\n # Search/download YAML file (if necessary) and return path, checking suffix\n return check_file(file, suffix)" }, { "identifier": "make_divisible", "path": "utils/general.py", "snippet": "def make_divisible(x, divisor):\n # Returns nearest x divisible by divisor\n if isinstance(divisor, torch.Tensor):\n divisor = int(divisor.max()) # to int\n return math.ceil(x / divisor) * divisor" }, { "identifier": "print_args", "path": "utils/general.py", "snippet": "def print_args(args: Optional[dict] = None, show_file=True, show_func=False):\n # Print function arguments (optional args dict)\n x = inspect.currentframe().f_back # previous frame\n file, _, func, _, _ = inspect.getframeinfo(x)\n if args is None: # get args automatically\n args, _, _, frm = inspect.getargvalues(x)\n args = {k: v for k, v in frm.items() if k in args}\n try:\n file = Path(file).resolve().relative_to(ROOT).with_suffix('')\n except ValueError:\n file = Path(file).stem\n s = (f'{file}: ' if show_file else '') + (f'{func}: ' if show_func else '')\n LOGGER.info(colorstr(s) + ', '.join(f'{k}={v}' for k, v in args.items()))" }, { "identifier": "feature_visualization", "path": "utils/plots.py", "snippet": "def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')):\n \"\"\"\n x: Features to be visualized\n module_type: Module type\n stage: Module stage within model\n n: Maximum number of feature maps to plot\n save_dir: Directory to save results\n \"\"\"\n if 'Detect' not in module_type:\n batch, channels, height, width = x.shape # batch, channels, height, width\n if height > 1 and width > 1:\n f = save_dir / f\"stage{stage}_{module_type.split('.')[-1]}_features.png\" # filename\n\n blocks = torch.chunk(x[0].cpu(), channels, dim=0) # select batch index 0, block by channels\n n = min(n, channels) # number of plots\n fig, ax = plt.subplots(math.ceil(n / 8), 8, tight_layout=True) # 8 rows x n/8 cols\n ax = ax.ravel()\n plt.subplots_adjust(wspace=0.05, hspace=0.05)\n for i in range(n):\n ax[i].imshow(blocks[i].squeeze()) # cmap='gray'\n ax[i].axis('off')\n\n LOGGER.info(f'Saving {f}... ({n}/{channels})')\n plt.savefig(f, dpi=300, bbox_inches='tight')\n plt.close()\n np.save(str(f.with_suffix('.npy')), x[0].cpu().numpy()) # npy save" }, { "identifier": "fuse_conv_and_bn", "path": "utils/torch_utils.py", "snippet": "def fuse_conv_and_bn(conv, bn):\n # Fuse Conv2d() and BatchNorm2d() layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/\n fusedconv = nn.Conv2d(conv.in_channels,\n conv.out_channels,\n kernel_size=conv.kernel_size,\n stride=conv.stride,\n padding=conv.padding,\n dilation=conv.dilation,\n groups=conv.groups,\n bias=True).requires_grad_(False).to(conv.weight.device)\n\n # Prepare filters\n w_conv = conv.weight.clone().view(conv.out_channels, -1)\n w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))\n fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape))\n\n # Prepare spatial bias\n b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias\n b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))\n fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)\n\n return fusedconv" }, { "identifier": "initialize_weights", "path": "utils/torch_utils.py", "snippet": "def initialize_weights(model):\n for m in model.modules():\n t = type(m)\n if t is nn.Conv2d:\n pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif t is nn.BatchNorm2d:\n m.eps = 1e-3\n m.momentum = 0.03\n elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]:\n m.inplace = True" }, { "identifier": "model_info", "path": "utils/torch_utils.py", "snippet": "def model_info(model, verbose=False, imgsz=640):\n # Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320]\n n_p = sum(x.numel() for x in model.parameters()) # number parameters\n n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients\n if verbose:\n print(f\"{'layer':>5} {'name':>40} {'gradient':>9} {'parameters':>12} {'shape':>20} {'mu':>10} {'sigma':>10}\")\n for i, (name, p) in enumerate(model.named_parameters()):\n name = name.replace('module_list.', '')\n print('%5g %40s %9s %12g %20s %10.3g %10.3g' %\n (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))\n\n try: # FLOPs\n p = next(model.parameters())\n stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32 # max stride\n im = torch.empty((1, p.shape[1], stride, stride), device=p.device) # input image in BCHW format\n flops = thop.profile(deepcopy(model), inputs=(im,), verbose=False)[0] / 1E9 * 2 # stride GFLOPs\n imgsz = imgsz if isinstance(imgsz, list) else [imgsz, imgsz] # expand if int/float\n fs = f', {flops * imgsz[0] / stride * imgsz[1] / stride:.1f} GFLOPs' # 640x640 GFLOPs\n except Exception:\n fs = ''\n\n name = Path(model.yaml_file).stem.replace('yolov5', 'YOLOv5') if hasattr(model, 'yaml_file') else 'Model'\n LOGGER.info(f\"{name} summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}\")" }, { "identifier": "profile", "path": "utils/torch_utils.py", "snippet": "def profile(input, ops, n=10, device=None):\n \"\"\" YOLOv5 speed/memory/FLOPs profiler\n Usage:\n input = torch.randn(16, 3, 640, 640)\n m1 = lambda x: x * torch.sigmoid(x)\n m2 = nn.SiLU()\n profile(input, [m1, m2], n=100) # profile over 100 iterations\n \"\"\"\n results = []\n if not isinstance(device, torch.device):\n device = select_device(device)\n print(f\"{'Params':>12s}{'GFLOPs':>12s}{'GPU_mem (GB)':>14s}{'forward (ms)':>14s}{'backward (ms)':>14s}\"\n f\"{'input':>24s}{'output':>24s}\")\n\n for x in input if isinstance(input, list) else [input]:\n x = x.to(device)\n x.requires_grad = True\n for m in ops if isinstance(ops, list) else [ops]:\n m = m.to(device) if hasattr(m, 'to') else m # device\n m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m\n tf, tb, t = 0, 0, [0, 0, 0] # dt forward, backward\n try:\n flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPs\n except Exception:\n flops = 0\n\n try:\n for _ in range(n):\n t[0] = time_sync()\n y = m(x)\n t[1] = time_sync()\n try:\n _ = (sum(yi.sum() for yi in y) if isinstance(y, list) else y).sum().backward()\n t[2] = time_sync()\n except Exception: # no backward method\n # print(e) # for debug\n t[2] = float('nan')\n tf += (t[1] - t[0]) * 1000 / n # ms per op forward\n tb += (t[2] - t[1]) * 1000 / n # ms per op backward\n mem = torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0 # (GB)\n s_in, s_out = (tuple(x.shape) if isinstance(x, torch.Tensor) else 'list' for x in (x, y)) # shapes\n p = sum(x.numel() for x in m.parameters()) if isinstance(m, nn.Module) else 0 # parameters\n print(f'{p:12}{flops:12.4g}{mem:>14.3f}{tf:14.4g}{tb:14.4g}{str(s_in):>24s}{str(s_out):>24s}')\n results.append([p, flops, mem, tf, tb, s_in, s_out])\n except Exception as e:\n print(e)\n results.append(None)\n torch.cuda.empty_cache()\n return results" }, { "identifier": "scale_img", "path": "utils/torch_utils.py", "snippet": "def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416)\n # Scales img(bs,3,y,x) by ratio constrained to gs-multiple\n if ratio == 1.0:\n return img\n h, w = img.shape[2:]\n s = (int(h * ratio), int(w * ratio)) # new size\n img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize\n if not same_shape: # pad/crop img\n h, w = (math.ceil(x * ratio / gs) * gs for x in (h, w))\n return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean" }, { "identifier": "select_device", "path": "utils/torch_utils.py", "snippet": "def select_device(device='', batch_size=0, newline=True):\n # device = None or 'cpu' or 0 or '0' or '0,1,2,3'\n s = f'YOLOv5 🚀 {git_describe() or file_date()} Python-{platform.python_version()} torch-{torch.__version__} '\n device = str(device).strip().lower().replace('cuda:', '').replace('none', '') # to string, 'cuda:0' to '0'\n cpu = device == 'cpu'\n mps = device == 'mps' # Apple Metal Performance Shaders (MPS)\n if cpu or mps:\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False\n elif device: # non-cpu device requested\n os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable - must be before assert is_available()\n assert torch.cuda.is_available() and torch.cuda.device_count() >= len(device.replace(',', '')), \\\n f\"Invalid CUDA '--device {device}' requested, use '--device cpu' or pass valid CUDA device(s)\"\n\n if not cpu and not mps and torch.cuda.is_available(): # prefer GPU if available\n devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7\n n = len(devices) # device count\n if n > 1 and batch_size > 0: # check batch_size is divisible by device_count\n assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'\n space = ' ' * (len(s) + 1)\n for i, d in enumerate(devices):\n p = torch.cuda.get_device_properties(i)\n s += f\"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / (1 << 20):.0f}MiB)\\n\" # bytes to MB\n arg = 'cuda:0'\n elif mps and getattr(torch, 'has_mps', False) and torch.backends.mps.is_available(): # prefer MPS if available\n s += 'MPS\\n'\n arg = 'mps'\n else: # revert to CPU\n s += 'CPU\\n'\n arg = 'cpu'\n\n if not newline:\n s = s.rstrip()\n LOGGER.info(s)\n return torch.device(arg)" }, { "identifier": "time_sync", "path": "utils/torch_utils.py", "snippet": "def time_sync():\n # PyTorch-accurate time\n if torch.cuda.is_available():\n torch.cuda.synchronize()\n return time.time()" } ]
import argparse import contextlib import os import platform import sys import thop # for FLOPs computation import yaml # for torch hub from copy import deepcopy from pathlib import Path from models.common import * from models.experimental import * from utils.autoanchor import check_anchor_order from utils.general import LOGGER, check_version, check_yaml, make_divisible, print_args from utils.plots import feature_visualization from utils.torch_utils import (fuse_conv_and_bn, initialize_weights, model_info, profile, scale_img, select_device, time_sync)
7,849
b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85) b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) b.data[:, 5:5 + m.nc] += math.log(0.6 / (m.nc - 0.99999)) if cf is None else torch.log(cf / cf.sum()) # cls mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) Model = DetectionModel # retain YOLOv5 'Model' class for backwards compatibility class SegmentationModel(DetectionModel): # YOLOv5 segmentation model def __init__(self, cfg='yolov5s-seg.yaml', ch=3, nc=None, anchors=None): super().__init__(cfg, ch, nc, anchors) class ClassificationModel(BaseModel): # YOLOv5 classification model def __init__(self, cfg=None, model=None, nc=1000, cutoff=10): # yaml, model, number of classes, cutoff index super().__init__() self._from_detection_model(model, nc, cutoff) if model is not None else self._from_yaml(cfg) def _from_detection_model(self, model, nc=1000, cutoff=10): # Create a YOLOv5 classification model from a YOLOv5 detection model if isinstance(model, DetectMultiBackend): model = model.model # unwrap DetectMultiBackend model.model = model.model[:cutoff] # backbone m = model.model[-1] # last layer ch = m.conv.in_channels if hasattr(m, 'conv') else m.cv1.conv.in_channels # ch into module c = Classify(ch, nc) # Classify() c.i, c.f, c.type = m.i, m.f, 'models.common.Classify' # index, from, type model.model[-1] = c # replace self.model = model.model self.stride = model.stride self.save = [] self.nc = nc def _from_yaml(self, cfg): # Create a YOLOv5 classification model from a *.yaml file self.model = None def parse_model(d, ch): # model_dict, input_channels(3) # Parse a YOLOv5 model.yaml dictionary LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}") anchors, nc, gd, gw, act = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'], d.get('activation') if act: Conv.default_act = eval(act) # redefine default activation, i.e. Conv.default_act = nn.SiLU() LOGGER.info(f"{colorstr('activation:')} {act}") # print na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors no = na * (nc + 5) # number of outputs = anchors * (classes + 5) layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args m = eval(m) if isinstance(m, str) else m # eval strings for j, a in enumerate(args): with contextlib.suppress(NameError): args[j] = eval(a) if isinstance(a, str) else a # eval strings n = n_ = max(round(n * gd), 1) if n > 1 else n # depth gain if m in { Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, SPPF, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP, C3, C3TR, C3SPP, C3Ghost, nn.ConvTranspose2d, DWConvTranspose2d, C3x, DownSample}: c1, c2 = ch[f], args[0] if c2 != no: # if not output c2 = make_divisible(c2 * gw, 8) args = [c1, c2, *args[1:]] if m in {BottleneckCSP, C3, C3TR, C3Ghost, C3x}: args.insert(2, n) # number of repeats n = 1 elif m is nn.BatchNorm2d: args = [ch[f]] elif m is Concat: c2 = sum(ch[x] for x in f) elif m is ScalSeq: c2 = args[0] elif m is Add: c2 = args[0] elif m is Zoom_cat: c2 = 3*args[0] elif m is attention_model: c2 = args[0] # TODO: channel, gw, gd elif m in {Detect, Segment}: args.append([ch[x] for x in f]) if isinstance(args[1], int): # number of anchors args[1] = [list(range(args[1] * 2))] * len(f) if m is Segment: args[3] = make_divisible(args[3] * gw, 8) elif m is Contract: c2 = ch[f] * args[0] ** 2 elif m is Expand: c2 = ch[f] // args[0] ** 2 else: c2 = ch[f] m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module t = str(m)[8:-2].replace('__main__.', '') # module type np = sum(x.numel() for x in m_.parameters()) # number params m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params LOGGER.info(f'{i:>3}{str(f):>18}{n_:>3}{np:10.0f} {t:<40}{str(args):<30}') # print save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist layers.append(m_) if i == 0: ch = [] ch.append(c2) return nn.Sequential(*layers), sorted(save) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml') parser.add_argument('--batch-size', type=int, default=1, help='total batch size for all GPUs') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--profile', action='store_true', help='profile model speed') parser.add_argument('--line-profile', action='store_true', help='profile model speed layer by layer') parser.add_argument('--test', action='store_true', help='test all yolo*.yaml') opt = parser.parse_args() opt.cfg = check_yaml(opt.cfg) # check YAML print_args(vars(opt))
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ YOLO-specific modules Usage: $ python models/yolo.py --cfg yolov5s.yaml """ FILE = Path(__file__).resolve() ROOT = FILE.parents[1] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH if platform.system() != 'Windows': ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative try: except ImportError: thop = None class Detect(nn.Module): # YOLOv5 Detect head for detection models stride = None # strides computed during build dynamic = False # force grid reconstruction export = False # export mode def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer super().__init__() self.nc = nc # number of classes self.no = nc + 5 # number of outputs per anchor self.nl = len(anchors) # number of detection layers self.na = len(anchors[0]) // 2 # number of anchors self.grid = [torch.empty(0) for _ in range(self.nl)] # init grid self.anchor_grid = [torch.empty(0) for _ in range(self.nl)] # init anchor grid self.register_buffer('anchors', torch.tensor(anchors).float().view(self.nl, -1, 2)) # shape(nl,na,2) self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv self.inplace = inplace # use inplace ops (e.g. slice assignment) def forward(self, x): z = [] # inference output for i in range(self.nl): x[i] = self.m[i](x[i]) # conv bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() if not self.training: # inference if self.dynamic or self.grid[i].shape[2:4] != x[i].shape[2:4]: self.grid[i], self.anchor_grid[i] = self._make_grid(nx, ny, i) if isinstance(self, Segment): # (boxes + masks) xy, wh, conf, mask = x[i].split((2, 2, self.nc + 1, self.no - self.nc - 5), 4) xy = (xy.sigmoid() * 2 + self.grid[i]) * self.stride[i] # xy wh = (wh.sigmoid() * 2) ** 2 * self.anchor_grid[i] # wh y = torch.cat((xy, wh, conf.sigmoid(), mask), 4) else: # Detect (boxes only) xy, wh, conf = x[i].sigmoid().split((2, 2, self.nc + 1), 4) xy = (xy * 2 + self.grid[i]) * self.stride[i] # xy wh = (wh * 2) ** 2 * self.anchor_grid[i] # wh y = torch.cat((xy, wh, conf), 4) z.append(y.view(bs, self.na * nx * ny, self.no)) return x if self.training else (torch.cat(z, 1),) if self.export else (torch.cat(z, 1), x) def _make_grid(self, nx=20, ny=20, i=0, torch_1_10=check_version(torch.__version__, '1.10.0')): d = self.anchors[i].device t = self.anchors[i].dtype shape = 1, self.na, ny, nx, 2 # grid shape y, x = torch.arange(ny, device=d, dtype=t), torch.arange(nx, device=d, dtype=t) yv, xv = torch.meshgrid(y, x, indexing='ij') if torch_1_10 else torch.meshgrid(y, x) # torch>=0.7 compatibility grid = torch.stack((xv, yv), 2).expand(shape) - 0.5 # add grid offset, i.e. y = 2.0 * x - 0.5 anchor_grid = (self.anchors[i] * self.stride[i]).view((1, self.na, 1, 1, 2)).expand(shape) return grid, anchor_grid class Segment(Detect): # YOLOv5 Segment head for segmentation models def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), inplace=True): super().__init__(nc, anchors, ch, inplace) self.nm = nm # number of masks self.npr = npr # number of protos self.no = 5 + nc + self.nm # number of outputs per anchor self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv self.proto = Proto(ch[0], self.npr, self.nm) # protos self.detect = Detect.forward def forward(self, x): p = self.proto(x[0]) x = self.detect(self, x) return (x, p) if self.training else (x[0], p) if self.export else (x[0], p, x[1]) class BaseModel(nn.Module): # YOLOv5 base model def forward(self, x, profile=False, visualize=False): return self._forward_once(x, profile, visualize) # single-scale inference, train def _forward_once(self, x, profile=False, visualize=False): y, dt = [], [] # outputs for m in self.model: if m.f != -1: # if not from previous layer x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers if profile: self._profile_one_layer(m, x, dt) x = m(x) # run y.append(x if m.i in self.save else None) # save output if visualize: feature_visualization(x, m.type, m.i, save_dir=visualize) return x def _profile_one_layer(self, m, x, dt): c = m == self.model[-1] # is final layer, copy input as inplace fix o = thop.profile(m, inputs=(x.copy() if c else x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs t = time_sync() for _ in range(10): m(x.copy() if c else x) dt.append((time_sync() - t) * 100) if m == self.model[0]: LOGGER.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s} module") LOGGER.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}') if c: LOGGER.info(f"{sum(dt):10.2f} {'-':>10s} {'-':>10s} Total") def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers LOGGER.info('Fusing layers... ') for m in self.model.modules(): if isinstance(m, (Conv, DWConv)) and hasattr(m, 'bn'): m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv delattr(m, 'bn') # remove batchnorm m.forward = m.forward_fuse # update forward self.info() return self def info(self, verbose=False, img_size=640): # print model information model_info(self, verbose, img_size) def _apply(self, fn): # Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers self = super()._apply(fn) m = self.model[-1] # Detect() if isinstance(m, (Detect, Segment)): m.stride = fn(m.stride) m.grid = list(map(fn, m.grid)) if isinstance(m.anchor_grid, list): m.anchor_grid = list(map(fn, m.anchor_grid)) return self class DetectionModel(BaseModel): # YOLOv5 detection model def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes super().__init__() if isinstance(cfg, dict): self.yaml = cfg # model dict else: # is *.yaml self.yaml_file = Path(cfg).name with open(cfg, encoding='ascii', errors='ignore') as f: self.yaml = yaml.safe_load(f) # model dict # Define model ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels if nc and nc != self.yaml['nc']: LOGGER.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}") self.yaml['nc'] = nc # override yaml value if anchors: LOGGER.info(f'Overriding model.yaml anchors with anchors={anchors}') self.yaml['anchors'] = round(anchors) # override yaml value self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist self.names = [str(i) for i in range(self.yaml['nc'])] # default names self.inplace = self.yaml.get('inplace', True) # Build strides, anchors m = self.model[-1] # Detect() if isinstance(m, (Detect, Segment)): s = 256 # 2x min stride m.inplace = self.inplace forward = lambda x: self.forward(x)[0] if isinstance(m, Segment) else self.forward(x) m.stride = torch.tensor([s / x.shape[-2] for x in forward(torch.zeros(1, ch, s, s))]) # forward check_anchor_order(m) m.anchors /= m.stride.view(-1, 1, 1) self.stride = m.stride self._initialize_biases() # only run once # Init weights, biases initialize_weights(self) self.info() LOGGER.info('') def forward(self, x, augment=False, profile=False, visualize=False): if augment: return self._forward_augment(x) # augmented inference, None return self._forward_once(x, profile, visualize) # single-scale inference, train def _forward_augment(self, x): img_size = x.shape[-2:] # height, width s = [1, 0.83, 0.67] # scales f = [None, 3, None] # flips (2-ud, 3-lr) y = [] # outputs for si, fi in zip(s, f): xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max())) yi = self._forward_once(xi)[0] # forward # cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save yi = self._descale_pred(yi, fi, si, img_size) y.append(yi) y = self._clip_augmented(y) # clip augmented tails return torch.cat(y, 1), None # augmented inference, train def _descale_pred(self, p, flips, scale, img_size): # de-scale predictions following augmented inference (inverse operation) if self.inplace: p[..., :4] /= scale # de-scale if flips == 2: p[..., 1] = img_size[0] - p[..., 1] # de-flip ud elif flips == 3: p[..., 0] = img_size[1] - p[..., 0] # de-flip lr else: x, y, wh = p[..., 0:1] / scale, p[..., 1:2] / scale, p[..., 2:4] / scale # de-scale if flips == 2: y = img_size[0] - y # de-flip ud elif flips == 3: x = img_size[1] - x # de-flip lr p = torch.cat((x, y, wh, p[..., 4:]), -1) return p def _clip_augmented(self, y): # Clip YOLOv5 augmented inference tails nl = self.model[-1].nl # number of detection layers (P3-P5) g = sum(4 ** x for x in range(nl)) # grid points e = 1 # exclude layer count i = (y[0].shape[1] // g) * sum(4 ** x for x in range(e)) # indices y[0] = y[0][:, :-i] # large i = (y[-1].shape[1] // g) * sum(4 ** (nl - 1 - x) for x in range(e)) # indices y[-1] = y[-1][:, i:] # small return y def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency # https://arxiv.org/abs/1708.02002 section 3.3 # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1. m = self.model[-1] # Detect() module for mi, s in zip(m.m, m.stride): # from b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85) b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) b.data[:, 5:5 + m.nc] += math.log(0.6 / (m.nc - 0.99999)) if cf is None else torch.log(cf / cf.sum()) # cls mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) Model = DetectionModel # retain YOLOv5 'Model' class for backwards compatibility class SegmentationModel(DetectionModel): # YOLOv5 segmentation model def __init__(self, cfg='yolov5s-seg.yaml', ch=3, nc=None, anchors=None): super().__init__(cfg, ch, nc, anchors) class ClassificationModel(BaseModel): # YOLOv5 classification model def __init__(self, cfg=None, model=None, nc=1000, cutoff=10): # yaml, model, number of classes, cutoff index super().__init__() self._from_detection_model(model, nc, cutoff) if model is not None else self._from_yaml(cfg) def _from_detection_model(self, model, nc=1000, cutoff=10): # Create a YOLOv5 classification model from a YOLOv5 detection model if isinstance(model, DetectMultiBackend): model = model.model # unwrap DetectMultiBackend model.model = model.model[:cutoff] # backbone m = model.model[-1] # last layer ch = m.conv.in_channels if hasattr(m, 'conv') else m.cv1.conv.in_channels # ch into module c = Classify(ch, nc) # Classify() c.i, c.f, c.type = m.i, m.f, 'models.common.Classify' # index, from, type model.model[-1] = c # replace self.model = model.model self.stride = model.stride self.save = [] self.nc = nc def _from_yaml(self, cfg): # Create a YOLOv5 classification model from a *.yaml file self.model = None def parse_model(d, ch): # model_dict, input_channels(3) # Parse a YOLOv5 model.yaml dictionary LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}") anchors, nc, gd, gw, act = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'], d.get('activation') if act: Conv.default_act = eval(act) # redefine default activation, i.e. Conv.default_act = nn.SiLU() LOGGER.info(f"{colorstr('activation:')} {act}") # print na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors no = na * (nc + 5) # number of outputs = anchors * (classes + 5) layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args m = eval(m) if isinstance(m, str) else m # eval strings for j, a in enumerate(args): with contextlib.suppress(NameError): args[j] = eval(a) if isinstance(a, str) else a # eval strings n = n_ = max(round(n * gd), 1) if n > 1 else n # depth gain if m in { Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, SPPF, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP, C3, C3TR, C3SPP, C3Ghost, nn.ConvTranspose2d, DWConvTranspose2d, C3x, DownSample}: c1, c2 = ch[f], args[0] if c2 != no: # if not output c2 = make_divisible(c2 * gw, 8) args = [c1, c2, *args[1:]] if m in {BottleneckCSP, C3, C3TR, C3Ghost, C3x}: args.insert(2, n) # number of repeats n = 1 elif m is nn.BatchNorm2d: args = [ch[f]] elif m is Concat: c2 = sum(ch[x] for x in f) elif m is ScalSeq: c2 = args[0] elif m is Add: c2 = args[0] elif m is Zoom_cat: c2 = 3*args[0] elif m is attention_model: c2 = args[0] # TODO: channel, gw, gd elif m in {Detect, Segment}: args.append([ch[x] for x in f]) if isinstance(args[1], int): # number of anchors args[1] = [list(range(args[1] * 2))] * len(f) if m is Segment: args[3] = make_divisible(args[3] * gw, 8) elif m is Contract: c2 = ch[f] * args[0] ** 2 elif m is Expand: c2 = ch[f] // args[0] ** 2 else: c2 = ch[f] m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module t = str(m)[8:-2].replace('__main__.', '') # module type np = sum(x.numel() for x in m_.parameters()) # number params m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params LOGGER.info(f'{i:>3}{str(f):>18}{n_:>3}{np:10.0f} {t:<40}{str(args):<30}') # print save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist layers.append(m_) if i == 0: ch = [] ch.append(c2) return nn.Sequential(*layers), sorted(save) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml') parser.add_argument('--batch-size', type=int, default=1, help='total batch size for all GPUs') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--profile', action='store_true', help='profile model speed') parser.add_argument('--line-profile', action='store_true', help='profile model speed layer by layer') parser.add_argument('--test', action='store_true', help='test all yolo*.yaml') opt = parser.parse_args() opt.cfg = check_yaml(opt.cfg) # check YAML print_args(vars(opt))
device = select_device(opt.device)
12
2023-12-10 14:18:29+00:00
12k
ylacombe/finetune-hf-vits
convert_original_discriminator_checkpoint.py
[ { "identifier": "VitsFeatureExtractor", "path": "utils/feature_extraction_vits.py", "snippet": "class VitsFeatureExtractor(SequenceFeatureExtractor):\n r\"\"\"\n Constructs a Vits feature extractor.\n\n This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains\n most of the main methods. Users should refer to this superclass for more information regarding those methods.\n\n This class extracts `Short Time Fourier Transform` from raw speech using a custom numpy implementation which should\n match pytorch's `torch.stft`.\n\n Args:\n feature_size (`int`, defaults to 80):\n The feature dimension of the extracted features.\n sampling_rate (`int`, defaults to 22050):\n The sampling rate at which the audio files should be digitalized expressed in hertz (Hz).\n hop_length (`int`, defaults to 256):\n Length of the overlaping windows for the STFT used to obtain the Mel Frequency coefficients.\n n_fft (`int`, defaults to 1024):\n Size of the Fourier transform.\n padding_value (`float`, *optional*, defaults to 0.0):\n Padding value used to pad the audio. Should correspond to silences.\n return_attention_mask (`bool`, *optional*, defaults to `False`):\n Whether to return the attention mask.\n\n [What are attention masks?](../glossary#attention-mask)\n\n <Tip>\n\n For Vits finetuning, `attention_mask` should always be passed for batched inference, to avoid subtle bugs.\n\n </Tip>\n\n max_wav_value (`float`, defaults to 32768.0):\n Maximum wav value. Used to normalize the input waveforms if `do_normalize=True` in the forward pass of this\n feature extractor.\n \"\"\"\n\n model_input_names = [\"input_features\"]\n\n def __init__(\n self,\n feature_size=80,\n sampling_rate=22050,\n hop_length=256,\n n_fft=1024,\n padding_value=0.0,\n return_attention_mask=False, # pad inputs to max length with silence token (zero) and no attention mask,\n max_wav_value=32768.0,\n **kwargs,\n ):\n super().__init__(\n feature_size=feature_size,\n sampling_rate=sampling_rate,\n padding_value=padding_value,\n return_attention_mask=return_attention_mask,\n **kwargs,\n )\n self.n_fft = n_fft\n self.hop_length = hop_length\n self.sampling_rate = sampling_rate\n self.mel_filters = mel_filter_bank(\n num_frequency_bins=1 + n_fft // 2,\n num_mel_filters=feature_size,\n min_frequency=0.0,\n max_frequency=sampling_rate // 2,\n sampling_rate=sampling_rate,\n norm=\"slaney\",\n mel_scale=\"slaney\",\n )\n self.max_wav_value = max_wav_value\n\n def _torch_extract_fbank_features(self, waveform: np.array) -> Tuple[torch.Tensor]:\n \"\"\"\n Compute the log-mel spectrogram of the provided audio using the PyTorch STFT implementation.\n \"\"\"\n if len(waveform.shape) == 1:\n waveform = waveform.unsqueeze(0)\n\n waveform = torch.nn.functional.pad(\n waveform,\n (int((self.n_fft - self.hop_length) / 2), int((self.n_fft - self.hop_length) / 2)),\n mode=\"reflect\",\n )\n\n window = torch.hann_window(self.n_fft).to(waveform.device)\n stft = torch.stft(\n waveform,\n self.n_fft,\n hop_length=self.hop_length,\n win_length=self.n_fft,\n window=window,\n center=False,\n pad_mode=\"reflect\",\n normalized=False,\n onesided=True,\n return_complex=False,\n )\n magnitudes = torch.sqrt(stft.pow(2).sum(-1) + 1e-6)\n\n mel_filters = torch.from_numpy(self.mel_filters).type(torch.float32).to(waveform.device)\n mel_spec = mel_filters.T @ magnitudes\n\n log_spec = torch.clamp(mel_spec, min=1e-5).log()\n return magnitudes, log_spec\n\n def __call__(\n self,\n raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],\n truncation: bool = False,\n pad_to_multiple_of: Optional[int] = None,\n return_tensors: Optional[Union[str, TensorType]] = None,\n return_attention_mask: Optional[bool] = True,\n padding: Optional[str] = True,\n max_length: Optional[int] = None,\n sampling_rate: Optional[int] = None,\n do_normalize: Optional[bool] = None,\n **kwargs,\n ) -> BatchFeature:\n \"\"\"\n Main method to featurize and prepare for the model one or several sequence(s).\n\n Args:\n raw_speech (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`):\n The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float\n values, a list of numpy arrays or a list of list of float values. Must be mono channel audio, not\n stereo, i.e. single float per timestep.\n truncation (`bool`, *optional*, default to `False`):\n Activates truncation to cut input sequences longer than *max_length* to *max_length*.\n pad_to_multiple_of (`int`, *optional*, defaults to None):\n If set will pad the sequence to a multiple of the provided value.\n\n This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability\n `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*, defaults to `True`):\n Whether to return the attention mask. If left to the default, will return the attention mask according\n to the specific feature_extractor's default.\n\n [What are attention masks?](../glossary#attention-mask)\n\n <Tip>\n\n For Vits finetuning, `attention_mask` should always be passed for batched inference, to avoid subtle\n bugs.\n\n </Tip>\n\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):\n Select a strategy to pad the returned sequences (according to the model's padding side and padding\n index) among:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single\n sequence if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n max_length (`int`, *optional*):\n Maximum length of the returned list and optionally padding length (see above).\n sampling_rate (`int`, *optional*):\n The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass\n `sampling_rate` at the forward call to prevent silent errors and allow automatic speech recognition\n pipeline.\n do_normalize (`bool`, *optional*):\n Whether or not to divide the input waveform by `self.max_wav_value`.\n \"\"\"\n\n if sampling_rate is not None:\n if sampling_rate != self.sampling_rate:\n raise ValueError(\n f\"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a\"\n f\" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input\"\n f\" was sampled with {self.sampling_rate} and not {sampling_rate}.\"\n )\n else:\n logger.warning(\n \"It is strongly recommended to pass the `sampling_rate` argument to this function. \"\n \"Failing to do so can result in silent errors that might be hard to debug.\"\n )\n\n is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1\n if is_batched_numpy and len(raw_speech.shape) > 2:\n raise ValueError(f\"Only mono-channel audio is supported for input to {self}\")\n is_batched = is_batched_numpy or (\n isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], (np.ndarray, tuple, list)))\n )\n\n if is_batched:\n raw_speech = [np.asarray([speech], dtype=np.float32).T for speech in raw_speech]\n elif not is_batched and not isinstance(raw_speech, np.ndarray):\n raw_speech = np.asarray(raw_speech, dtype=np.float32)\n elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64):\n raw_speech = raw_speech.astype(np.float32)\n\n # always return batch\n if not is_batched:\n raw_speech = [np.asarray([raw_speech]).T]\n\n if self.max_wav_value is not None and do_normalize:\n raw_speech = [\n speech if self.max_wav_value is None else speech / self.max_wav_value for speech in raw_speech\n ]\n\n batched_speech = BatchFeature({\"input_features\": raw_speech})\n\n # convert into correct format for padding\n padded_inputs = self.pad(\n batched_speech,\n padding=padding,\n max_length=max_length,\n truncation=truncation,\n pad_to_multiple_of=pad_to_multiple_of,\n return_attention_mask=return_attention_mask or do_normalize,\n return_tensors=\"pt\",\n )\n\n # make sure list is in array format\n if isinstance(padded_inputs.get(\"input_features\"),list):\n input_features = torch.tensor(padded_inputs.get(\"input_features\")).transpose(1, 2).transpose(0, 1)\n else:\n input_features = padded_inputs.get(\"input_features\").clone().detach().transpose(1, 2).transpose(0, 1)\n\n\n input_features = self._torch_extract_fbank_features(input_features[0])\n\n mel_scaled_input_features = input_features[1]\n input_features = input_features[0]\n\n padded_inputs[\"input_features\"] = input_features\n padded_inputs[\"mel_scaled_input_features\"] = mel_scaled_input_features\n\n if return_attention_mask:\n # rescale from sample (48000) to feature (3000)\n padded_inputs[\"attention_mask\"] = padded_inputs[\"attention_mask\"][:, :: self.hop_length]\n\n if return_tensors is not None:\n padded_inputs = padded_inputs.convert_to_tensors(return_tensors)\n\n return padded_inputs\n\n def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes this instance to a Python dictionary.\n\n Returns:\n `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.\n \"\"\"\n output = copy.deepcopy(self.__dict__)\n output[\"feature_extractor_type\"] = self.__class__.__name__\n if \"mel_filters\" in output:\n del output[\"mel_filters\"]\n return output" }, { "identifier": "VitsConfig", "path": "utils/configuration_vits.py", "snippet": "VITS_PRETRAINED_CONFIG_ARCHIVE_MAP = {\n \"facebook/mms-tts-eng\": \"https://huggingface.co/facebook/mms-tts-eng/resolve/main/config.json\",\n}\nclass VitsConfig(PretrainedConfig):\n def __init__(\n self,\n vocab_size=38,\n hidden_size=192,\n num_hidden_layers=6,\n num_attention_heads=2,\n window_size=4,\n use_bias=True,\n ffn_dim=768,\n layerdrop=0.1,\n ffn_kernel_size=3,\n flow_size=192,\n spectrogram_bins=513,\n hidden_act=\"relu\",\n hidden_dropout=0.1,\n attention_dropout=0.1,\n activation_dropout=0.1,\n initializer_range=0.02,\n layer_norm_eps=1e-5,\n use_stochastic_duration_prediction=True,\n num_speakers=1,\n speaker_embedding_size=0,\n upsample_initial_channel=512,\n upsample_rates=[8, 8, 2, 2],\n upsample_kernel_sizes=[16, 16, 4, 4],\n resblock_kernel_sizes=[3, 7, 11],\n resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5], [1, 3, 5]],\n leaky_relu_slope=0.1,\n depth_separable_channels=2,\n depth_separable_num_layers=3,\n duration_predictor_flow_bins=10,\n duration_predictor_tail_bound=5.0,\n duration_predictor_kernel_size=3,\n duration_predictor_dropout=0.5,\n duration_predictor_num_flows=4,\n duration_predictor_filter_channels=256,\n prior_encoder_num_flows=4,\n prior_encoder_num_wavenet_layers=4,\n posterior_encoder_num_wavenet_layers=16,\n wavenet_kernel_size=5,\n wavenet_dilation_rate=1,\n wavenet_dropout=0.0,\n speaking_rate=1.0,\n noise_scale=0.667,\n noise_scale_duration=0.8,\n sampling_rate=16_000,\n discriminator_kernel_size=5,\n discriminator_stride=3,\n discriminator_periods=[2, 3, 5, 7, 11],\n discriminator_period_channels=[1, 32, 128, 512, 1024],\n discriminator_scale_channels=[1, 16, 64, 256, 1024],\n segment_size=8192,\n hop_length=256,\n **kwargs,\n ):" }, { "identifier": "VitsDiscriminator", "path": "utils/modeling_vits_training.py", "snippet": "class VitsDiscriminator(VitsPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n if config.discriminator_scale_channels is not None:\n self.discriminators = nn.ModuleList(\n [VitsHifiGanDiscriminatorScaleResidualBlock(config.discriminator_scale_channels, config.leaky_relu_slope)]\n )\n else:\n self.discriminators = nn.ModuleList([])\n \n self.discriminators.extend(\n [\n VitsHifiGanDiscriminatorPeriodResidualBlock(\n config.discriminator_period_channels,\n period,\n config.discriminator_kernel_size,\n config.discriminator_stride,\n config.leaky_relu_slope,\n )\n for period in config.discriminator_periods\n ]\n )\n\n def forward(self, hidden_states):\n fmaps = []\n discriminated_hidden_states_list = []\n\n for discriminator in self.discriminators:\n discriminated_hidden_states, fmap = discriminator(hidden_states)\n fmaps.append(fmap)\n discriminated_hidden_states_list.append(discriminated_hidden_states)\n\n return discriminated_hidden_states_list, fmaps\n\n def apply_weight_norm(self):\n for disc in self.discriminators:\n disc.apply_weight_norm()\n\n def remove_weight_norm(self):\n for disc in self.discriminators:\n disc.remove_weight_norm()" }, { "identifier": "VitsModelForPreTraining", "path": "utils/modeling_vits_training.py", "snippet": "class VitsModelForPreTraining(VitsPreTrainedModel):\n def __init__(self, config: VitsConfig):\n super().__init__(config)\n self.config = config\n self.text_encoder = VitsTextEncoder(config)\n self.flow = VitsResidualCouplingBlock(config)\n self.decoder = VitsHifiGan(config)\n\n if config.use_stochastic_duration_prediction:\n self.duration_predictor = VitsStochasticDurationPredictor(config)\n else:\n self.duration_predictor = VitsDurationPredictor(config)\n\n if config.num_speakers > 1:\n self.embed_speaker = nn.Embedding(config.num_speakers, config.speaker_embedding_size)\n\n # This is used only for training.\n self.posterior_encoder = VitsPosteriorEncoder(config)\n self.discriminator = VitsDiscriminator(config)\n\n # These parameters control the synthesised speech properties\n self.speaking_rate = config.speaking_rate\n self.noise_scale = config.noise_scale\n self.noise_scale_duration = config.noise_scale_duration\n self.segment_size = self.config.segment_size // self.config.hop_length\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def resize_speaker_embeddings(\n self,\n new_num_speakers: int,\n speaker_embedding_size: Optional[int] = None,\n pad_to_multiple_of: Optional[int] = 2,\n ):\n if pad_to_multiple_of is not None:\n new_num_speakers = ((new_num_speakers + pad_to_multiple_of - 1) // pad_to_multiple_of) * pad_to_multiple_of\n\n # first, take care of embed_speaker\n if self.config.num_speakers <= 1:\n if speaker_embedding_size is None:\n raise ValueError(\n \"The current model had no previous speaker embedding, but `speaker_embedding_size` is not specified. Pass `speaker_embedding_size` to this method.\"\n )\n # create new embedding layer\n new_embeddings = nn.Embedding(\n new_num_speakers,\n speaker_embedding_size,\n device=self.device,\n )\n # initialize all new embeddings\n self._init_weights(new_embeddings)\n else:\n new_embeddings = self._get_resized_embeddings(self.embed_speaker, new_num_speakers)\n\n self.embed_speaker = new_embeddings\n\n # then take care of sub-models\n self.flow.resize_speaker_embeddings(speaker_embedding_size)\n for flow in self.flow.flows:\n self._init_weights(flow.wavenet.cond_layer)\n\n self.decoder.resize_speaker_embedding(speaker_embedding_size)\n self._init_weights(self.decoder.cond)\n\n self.duration_predictor.resize_speaker_embeddings(speaker_embedding_size)\n self._init_weights(self.duration_predictor.cond)\n\n self.posterior_encoder.resize_speaker_embeddings(speaker_embedding_size)\n self._init_weights(self.posterior_encoder.wavenet.cond_layer)\n\n self.config.num_speakers = new_num_speakers\n self.config.speaker_embedding_size = speaker_embedding_size\n\n def get_input_embeddings(self):\n return self.text_encoder.get_input_embeddings()\n\n def set_input_embeddings(self, value):\n self.text_encoder.set_input_embeddings(value)\n\n def apply_weight_norm(self):\n self.decoder.apply_weight_norm()\n self.flow.apply_weight_norm()\n self.posterior_encoder.apply_weight_norm()\n\n def remove_weight_norm(self):\n self.decoder.remove_weight_norm()\n self.flow.remove_weight_norm()\n self.posterior_encoder.remove_weight_norm()\n\n def discriminate(self, hidden_states):\n return self.discriminator(hidden_states)\n\n def get_encoder(self):\n return self.text_encoder\n\n def _inference_forward(\n self,\n input_ids: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n speaker_embeddings: Optional[torch.Tensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n padding_mask: Optional[torch.Tensor] = None,\n ):\n text_encoder_output = self.text_encoder(\n input_ids=input_ids,\n padding_mask=padding_mask,\n attention_mask=attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n hidden_states = text_encoder_output[0] if not return_dict else text_encoder_output.last_hidden_state\n hidden_states = hidden_states.transpose(1, 2)\n input_padding_mask = padding_mask.transpose(1, 2)\n prior_means = text_encoder_output[1] if not return_dict else text_encoder_output.prior_means\n prior_log_variances = text_encoder_output[2] if not return_dict else text_encoder_output.prior_log_variances\n\n if self.config.use_stochastic_duration_prediction:\n log_duration = self.duration_predictor(\n hidden_states,\n input_padding_mask,\n speaker_embeddings,\n reverse=True,\n noise_scale=self.noise_scale_duration,\n )\n else:\n log_duration = self.duration_predictor(hidden_states, input_padding_mask, speaker_embeddings)\n\n length_scale = 1.0 / self.speaking_rate\n duration = torch.ceil(torch.exp(log_duration) * input_padding_mask * length_scale)\n predicted_lengths = torch.clamp_min(torch.sum(duration, [1, 2]), 1).long()\n\n # Create a padding mask for the output lengths of shape (batch, 1, max_output_length)\n indices = torch.arange(predicted_lengths.max(), dtype=predicted_lengths.dtype, device=predicted_lengths.device)\n output_padding_mask = indices.unsqueeze(0) < predicted_lengths.unsqueeze(1)\n output_padding_mask = output_padding_mask.unsqueeze(1).to(input_padding_mask.dtype)\n\n # Reconstruct an attention tensor of shape (batch, 1, out_length, in_length)\n attn_mask = torch.unsqueeze(input_padding_mask, 2) * torch.unsqueeze(output_padding_mask, -1)\n batch_size, _, output_length, input_length = attn_mask.shape\n cum_duration = torch.cumsum(duration, -1).view(batch_size * input_length, 1)\n indices = torch.arange(output_length, dtype=duration.dtype, device=duration.device)\n valid_indices = indices.unsqueeze(0) < cum_duration\n valid_indices = valid_indices.to(attn_mask.dtype).view(batch_size, input_length, output_length)\n padded_indices = valid_indices - nn.functional.pad(valid_indices, [0, 0, 1, 0, 0, 0])[:, :-1]\n attn = padded_indices.unsqueeze(1).transpose(2, 3) * attn_mask\n\n # Expand prior distribution\n prior_means = torch.matmul(attn.squeeze(1), prior_means).transpose(1, 2)\n prior_log_variances = torch.matmul(attn.squeeze(1), prior_log_variances).transpose(1, 2)\n\n prior_latents = prior_means + torch.randn_like(prior_means) * torch.exp(prior_log_variances) * self.noise_scale\n latents = self.flow(prior_latents, output_padding_mask, speaker_embeddings, reverse=True)\n\n spectrogram = latents * output_padding_mask\n waveform = self.decoder(spectrogram, speaker_embeddings)\n waveform = waveform.squeeze(1)\n sequence_lengths = predicted_lengths * np.prod(self.config.upsample_rates)\n\n if not return_dict:\n outputs = (waveform, sequence_lengths, spectrogram) + text_encoder_output[3:]\n return outputs\n\n return VitsModelOutput(\n waveform=waveform,\n sequence_lengths=sequence_lengths,\n spectrogram=spectrogram,\n hidden_states=text_encoder_output.hidden_states,\n attentions=text_encoder_output.attentions,\n )\n\n @add_start_docstrings_to_model_forward(VITS_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=VitsModelOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n speaker_id: Optional[int] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n labels: Optional[torch.FloatTensor] = None,\n labels_attention_mask: Optional[torch.Tensor] = None,\n monotonic_alignment_function: Optional[Callable] = None,\n ) -> Union[Tuple[Any], VitsModelOutput]:\n r\"\"\"\n labels (`torch.FloatTensor` of shape `(batch_size, config.spectrogram_bins, sequence_length)`, *optional*):\n Float values of target spectrogram.\n labels_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing convolution and attention on labels. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n monotonic_alignment_function (`Callable`, *optional*):\n Monotonic alignment function. Used for training, i.e when `labels` are provided. By default, it will use a\n Pytorch implementation of the monotonic alignment function which is awfully slow. An alternative relying on\n cython is proposed in examples/pytorch/text-to-speech/run_vits_finetuning.py\n Returns:\n\n Example:\n\n ```python\n >>> from transformers import VitsTokenizer, VitsModelForPreTraining, set_seed\n >>> import torch\n\n >>> tokenizer = VitsTokenizer.from_pretrained(\"facebook/mms-tts-eng\")\n >>> model = VitsModelForPreTraining.from_pretrained(\"facebook/mms-tts-eng\")\n\n >>> inputs = tokenizer(text=\"Hello - my dog is cute\", return_tensors=\"pt\")\n\n >>> set_seed(555) # make deterministic\n\n >>> with torch.no_grad():\n ... outputs = model(inputs[\"input_ids\"])\n >>> outputs.waveform.shape\n torch.Size([1, 45824])\n ```\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n monotonic_alignment_function = (\n monotonic_align_max_path if monotonic_alignment_function is None else monotonic_alignment_function\n )\n\n if attention_mask is not None:\n input_padding_mask = attention_mask.unsqueeze(-1).float()\n else:\n input_padding_mask = torch.ones_like(input_ids).unsqueeze(-1).float()\n\n if self.config.num_speakers > 1 and speaker_id is not None:\n if isinstance(speaker_id, int):\n speaker_id = torch.full(size=(1,), fill_value=speaker_id, device=self.device)\n elif isinstance(speaker_id, (list, tuple, np.ndarray)):\n speaker_id = torch.tensor(speaker_id, device=self.device)\n\n if not ((0 <= speaker_id).all() and (speaker_id < self.config.num_speakers).all()).item():\n raise ValueError(f\"Set `speaker_id` in the range 0-{self.config.num_speakers - 1}.\")\n if not (len(speaker_id) == 1 or len(speaker_id == len(input_ids))):\n raise ValueError(\n f\"You passed {len(speaker_id)} `speaker_id` but you should either pass one speaker id or `batch_size` `speaker_id`.\"\n )\n\n speaker_embeddings = self.embed_speaker(speaker_id).unsqueeze(-1)\n else:\n speaker_embeddings = None\n\n # if inference, return inference forward of VitsModel\n if labels is None:\n return self._inference_forward(\n input_ids,\n attention_mask,\n speaker_embeddings,\n output_attentions,\n output_hidden_states,\n return_dict,\n input_padding_mask,\n )\n\n if labels_attention_mask is not None:\n labels_padding_mask = labels_attention_mask.unsqueeze(1).float()\n else:\n labels_attention_mask = torch.ones((labels.shape[0], labels.shape[2])).float().to(self.device)\n labels_padding_mask = labels_attention_mask.unsqueeze(1)\n\n text_encoder_output = self.text_encoder(\n input_ids=input_ids,\n padding_mask=input_padding_mask,\n attention_mask=attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n hidden_states = text_encoder_output[0] if not return_dict else text_encoder_output.last_hidden_state\n hidden_states = hidden_states.transpose(1, 2)\n input_padding_mask = input_padding_mask.transpose(1, 2)\n prior_means = text_encoder_output[1] if not return_dict else text_encoder_output.prior_means\n prior_log_variances = text_encoder_output[2] if not return_dict else text_encoder_output.prior_log_variances\n\n latents, posterior_means, posterior_log_variances = self.posterior_encoder(\n labels, labels_padding_mask, speaker_embeddings\n )\n prior_latents = self.flow(latents, labels_padding_mask, speaker_embeddings, reverse=False)\n\n prior_means, prior_log_variances = prior_means.transpose(1, 2), prior_log_variances.transpose(1, 2)\n with torch.no_grad():\n # negative cross-entropy\n\n # [batch_size, d, latent_length]\n prior_variances = torch.exp(-2 * prior_log_variances)\n # [batch_size, 1, latent_length]\n neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - prior_log_variances, [1], keepdim=True)\n # [batch_size, text_length, d] x [batch_size, d, latent_length] = [batch_size, text_length, latent_length]\n neg_cent2 = torch.matmul(-0.5 * (prior_latents**2).transpose(1, 2), prior_variances)\n # [batch_size, text_length, d] x [batch_size, d, latent_length] = [batch_size, text_length, latent_length]\n neg_cent3 = torch.matmul(prior_latents.transpose(1, 2), (prior_means * prior_variances))\n # [batch_size, 1, latent_length]\n neg_cent4 = torch.sum(-0.5 * (prior_means**2) * prior_variances, [1], keepdim=True)\n\n # [batch_size, text_length, latent_length]\n neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4\n\n attn_mask = torch.unsqueeze(input_padding_mask, 2) * torch.unsqueeze(labels_padding_mask, -1)\n\n attn = monotonic_alignment_function(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach()\n\n durations = attn.sum(2)\n\n if self.config.use_stochastic_duration_prediction:\n log_duration = self.duration_predictor(\n hidden_states, input_padding_mask, speaker_embeddings, durations=durations, reverse=False\n )\n log_duration = log_duration / torch.sum(input_padding_mask)\n else:\n log_duration_padded = torch.log(durations + 1e-6) * input_padding_mask\n log_duration = self.duration_predictor(hidden_states, input_padding_mask, speaker_embeddings)\n log_duration = torch.sum((log_duration - log_duration_padded) ** 2, [1, 2]) / torch.sum(input_padding_mask)\n\n # expand priors\n prior_means = torch.matmul(attn.squeeze(1), prior_means.transpose(1, 2)).transpose(1, 2)\n prior_log_variances = torch.matmul(attn.squeeze(1), prior_log_variances.transpose(1, 2)).transpose(1, 2)\n\n label_lengths = labels_attention_mask.sum(dim=1)\n latents_slice, ids_slice = rand_slice_segments(latents, label_lengths, segment_size=self.segment_size)\n\n waveform = self.decoder(latents_slice, speaker_embeddings)\n\n if not return_dict:\n outputs = (\n waveform,\n log_duration,\n attn,\n ids_slice,\n input_padding_mask,\n labels_padding_mask,\n latents,\n prior_latents,\n prior_means,\n prior_log_variances,\n posterior_means,\n posterior_log_variances,\n )\n return outputs\n\n return VitsTrainingOutput(\n waveform=waveform,\n log_duration=log_duration,\n attn=attn,\n ids_slice=ids_slice,\n input_padding_mask=input_padding_mask,\n labels_padding_mask=labels_padding_mask,\n latents=latents,\n prior_latents=prior_latents,\n prior_means=prior_means,\n prior_log_variances=prior_log_variances,\n posterior_means=posterior_means,\n posterior_log_variances=posterior_log_variances,\n )" } ]
import argparse import torch from transformers.models.vits.modeling_vits import VitsModel from transformers.models.vits.tokenization_vits import VitsTokenizer from huggingface_hub import hf_hub_download from utils.feature_extraction_vits import VitsFeatureExtractor from utils.configuration_vits import VitsConfig, logging from utils.modeling_vits_training import VitsDiscriminator, VitsModelForPreTraining
8,362
"""Convert VITS discriminator checkpoint and add it to an already converted VITS checkpoint.""" logging.set_verbosity_info() logger = logging.get_logger("transformers.models.vits") MAPPING = { "conv_post": "final_conv", } TOP_LEVEL_KEYS = [] IGNORE_KEYS = [] @torch.no_grad() def convert_checkpoint( language_code, pytorch_dump_folder_path, checkpoint_path=None, generator_checkpoint_path=None, repo_id=None, ): """ Copy/paste/tweak model's weights to transformers design. """ if language_code is not None: checkpoint_path = hf_hub_download(repo_id="facebook/mms-tts", subfolder=f"full_models/{language_code}", filename="D_100000.pth") generator_checkpoint_path = f"facebook/mms-tts-{language_code}" config = VitsConfig.from_pretrained(generator_checkpoint_path) generator = VitsModel.from_pretrained(generator_checkpoint_path) discriminator = VitsDiscriminator(config) for disc in discriminator.discriminators: disc.apply_weight_norm() checkpoint = torch.load(checkpoint_path, map_location=torch.device("cpu")) # load weights state_dict = checkpoint["model"] for k, v in list(state_dict.items()): for old_layer_name in MAPPING: new_k = k.replace(old_layer_name, MAPPING[old_layer_name]) state_dict[new_k] = state_dict.pop(k) extra_keys = set(state_dict.keys()) - set(discriminator.state_dict().keys()) extra_keys = {k for k in extra_keys if not k.endswith(".attn.bias")} missing_keys = set(discriminator.state_dict().keys()) - set(state_dict.keys()) missing_keys = {k for k in missing_keys if not k.endswith(".attn.bias")} if len(extra_keys) != 0: raise ValueError(f"extra keys found: {extra_keys}") if len(missing_keys) != 0: raise ValueError(f"missing keys: {missing_keys}") discriminator.load_state_dict(state_dict, strict=False) n_params = discriminator.num_parameters(exclude_embeddings=True) logger.info(f"model loaded: {round(n_params/1e6,1)}M params") for disc in discriminator.discriminators: disc.remove_weight_norm()
"""Convert VITS discriminator checkpoint and add it to an already converted VITS checkpoint.""" logging.set_verbosity_info() logger = logging.get_logger("transformers.models.vits") MAPPING = { "conv_post": "final_conv", } TOP_LEVEL_KEYS = [] IGNORE_KEYS = [] @torch.no_grad() def convert_checkpoint( language_code, pytorch_dump_folder_path, checkpoint_path=None, generator_checkpoint_path=None, repo_id=None, ): """ Copy/paste/tweak model's weights to transformers design. """ if language_code is not None: checkpoint_path = hf_hub_download(repo_id="facebook/mms-tts", subfolder=f"full_models/{language_code}", filename="D_100000.pth") generator_checkpoint_path = f"facebook/mms-tts-{language_code}" config = VitsConfig.from_pretrained(generator_checkpoint_path) generator = VitsModel.from_pretrained(generator_checkpoint_path) discriminator = VitsDiscriminator(config) for disc in discriminator.discriminators: disc.apply_weight_norm() checkpoint = torch.load(checkpoint_path, map_location=torch.device("cpu")) # load weights state_dict = checkpoint["model"] for k, v in list(state_dict.items()): for old_layer_name in MAPPING: new_k = k.replace(old_layer_name, MAPPING[old_layer_name]) state_dict[new_k] = state_dict.pop(k) extra_keys = set(state_dict.keys()) - set(discriminator.state_dict().keys()) extra_keys = {k for k in extra_keys if not k.endswith(".attn.bias")} missing_keys = set(discriminator.state_dict().keys()) - set(state_dict.keys()) missing_keys = {k for k in missing_keys if not k.endswith(".attn.bias")} if len(extra_keys) != 0: raise ValueError(f"extra keys found: {extra_keys}") if len(missing_keys) != 0: raise ValueError(f"missing keys: {missing_keys}") discriminator.load_state_dict(state_dict, strict=False) n_params = discriminator.num_parameters(exclude_embeddings=True) logger.info(f"model loaded: {round(n_params/1e6,1)}M params") for disc in discriminator.discriminators: disc.remove_weight_norm()
model = VitsModelForPreTraining(config)
3
2023-12-11 17:56:49+00:00
12k
youngskkim/CRN
exps/det/BEVDepth_r50_256x704_128x128_4key.py
[ { "identifier": "synchronize", "path": "utils/torch_dist.py", "snippet": "def synchronize():\n \"\"\"Helper function to synchronize (barrier)\n among all processes when using distributed training\"\"\"\n if not dist.is_available():\n return\n if not dist.is_initialized():\n return\n current_world_size = dist.get_world_size()\n if current_world_size == 1:\n return\n dist.barrier()" }, { "identifier": "run_cli", "path": "exps/base_cli.py", "snippet": "def run_cli(model_class=BEVDepthLightningModel,\n exp_name='base_exp',\n use_ema=False,\n ckpt_path=None):\n parent_parser = ArgumentParser(add_help=False)\n parent_parser = pl.Trainer.add_argparse_args(parent_parser)\n parent_parser.add_argument('-e',\n '--evaluate',\n dest='evaluate',\n action='store_true',\n help='evaluate model on validation set')\n parent_parser.add_argument('-p',\n '--predict',\n dest='predict',\n action='store_true',\n help='predict model on testing set')\n parent_parser.add_argument('-b', '--batch_size_per_device', type=int)\n parent_parser.add_argument('--seed',\n type=int,\n default=0,\n help='seed for initializing training.')\n parent_parser.add_argument('--ckpt_path', type=str)\n parser = BEVDepthLightningModel.add_model_specific_args(parent_parser)\n parser.set_defaults(profiler='simple',\n deterministic=False,\n max_epochs=24,\n strategy='ddp',\n # strategy='ddp_find_unused_parameters_false',\n num_sanity_val_steps=0,\n check_val_every_n_epoch=1,\n gradient_clip_val=5,\n limit_val_batches=0.25,\n log_every_n_steps=50,\n enable_checkpointing=True,\n precision=16,\n default_root_dir=os.path.join('./outputs/', exp_name))\n args = parser.parse_args()\n if args.seed is not None:\n pl.seed_everything(args.seed)\n\n model = model_class(**vars(args))\n if use_ema:\n train_dataloader = model.train_dataloader()\n ema_callback = EMACallback(\n len(train_dataloader.dataset) * args.max_epochs)\n trainer = pl.Trainer.from_argparse_args(args, callbacks=[ema_callback, ModelSummary(max_depth=3)])\n else:\n trainer = pl.Trainer.from_argparse_args(args, callbacks=[ModelSummary(max_depth=3)])\n if args.evaluate:\n trainer.test(model, ckpt_path=args.ckpt_path)\n elif args.predict:\n predict_step_outputs = trainer.predict(model, ckpt_path=args.ckpt_path)\n all_pred_results = list()\n all_img_metas = list()\n for predict_step_output in predict_step_outputs:\n for i in range(len(predict_step_output)):\n all_pred_results.append(predict_step_output[i][:3])\n all_img_metas.append(predict_step_output[i][3])\n synchronize()\n len_dataset = len(model.test_dataloader().dataset)\n all_pred_results = sum(\n map(list, zip(*all_gather_object(all_pred_results))),\n [])[:len_dataset]\n all_img_metas = sum(map(list, zip(*all_gather_object(all_img_metas))),\n [])[:len_dataset]\n model.evaluator._format_bbox(all_pred_results, all_img_metas,\n os.path.dirname(args.ckpt_path))\n else:\n if ckpt_path:\n trainer.fit(model, ckpt_path=ckpt_path)\n else:\n trainer.fit(model)" }, { "identifier": "BEVDepthLightningModel", "path": "exps/base_exp.py", "snippet": "class BEVDepthLightningModel(LightningModule):\n MODEL_NAMES = sorted(name for name in models.__dict__\n if name.islower() and not name.startswith('__')\n and callable(models.__dict__[name]))\n\n def __init__(self,\n gpus: int = 1,\n data_root='data/nuScenes',\n eval_interval=1,\n batch_size_per_device=8,\n class_names=CLASSES,\n backbone_img_conf=backbone_img_conf,\n head_conf=head_conf,\n ida_aug_conf=ida_aug_conf,\n bda_aug_conf=bda_aug_conf,\n rda_aug_conf=rda_aug_conf,\n default_root_dir='./outputs/',\n **kwargs):\n super().__init__()\n self.save_hyperparameters()\n self.gpus = gpus\n self.optimizer_config = optimizer_config\n self.pretrain_config = pretrain_config\n self.eval_interval = eval_interval\n self.batch_size_per_device = batch_size_per_device\n self.data_root = data_root\n self.class_names = class_names\n self.backbone_img_conf = backbone_img_conf\n self.head_conf = head_conf\n self.ida_aug_conf = ida_aug_conf\n self.bda_aug_conf = bda_aug_conf\n self.rda_aug_conf = rda_aug_conf\n mmcv.mkdir_or_exist(default_root_dir)\n self.default_root_dir = default_root_dir\n self.evaluator = DetNuscEvaluator(class_names=self.class_names,\n output_dir=self.default_root_dir)\n self.model = BaseBEVDepth(self.backbone_img_conf,\n self.head_conf)\n self.mode = 'valid'\n self.img_conf = img_conf\n self.data_use_cbgs = False\n self.load_interval = 1\n self.num_sweeps = 1\n self.sweep_idxes = list()\n self.key_idxes = list()\n self.data_return_depth = True\n self.downsample_factor = self.backbone_img_conf['downsample_factor']\n self.dbound = self.backbone_img_conf['d_bound']\n self.depth_channels = int(\n (self.dbound[1] - self.dbound[0]) / self.dbound[2])\n self.use_fusion = False\n self.train_info_paths = 'data/nuScenes/nuscenes_infos_train.pkl'\n self.val_info_paths = 'data/nuScenes/nuscenes_infos_val.pkl'\n self.predict_info_paths = 'data/nuScenes/nuscenes_infos_test.pkl'\n\n self.return_image = True\n self.return_depth = True\n self.return_radar_pv = False\n\n self.remove_z_axis = True\n\n def forward(self, sweep_imgs, mats, is_train=False, **inputs):\n return self.model(sweep_imgs, mats, is_train=is_train)\n\n def training_step(self, batch):\n if self.global_rank == 0:\n for pg in self.trainer.optimizers[0].param_groups:\n self.log('learning_rate', pg[\"lr\"])\n\n (sweep_imgs, mats, _, gt_boxes_3d, gt_labels_3d, _, depth_labels, pts_pv) = batch\n if torch.cuda.is_available():\n if self.return_image:\n sweep_imgs = sweep_imgs.cuda()\n for key, value in mats.items():\n mats[key] = value.cuda()\n if self.return_radar_pv:\n pts_pv = pts_pv.cuda()\n gt_boxes_3d = [gt_box.cuda() for gt_box in gt_boxes_3d]\n gt_labels_3d = [gt_label.cuda() for gt_label in gt_labels_3d]\n preds, depth_preds = self(sweep_imgs, mats,\n pts_pv=pts_pv,\n is_train=True)\n targets = self.model.get_targets(gt_boxes_3d, gt_labels_3d)\n loss_detection, loss_heatmap, loss_bbox = self.model.loss(targets, preds)\n\n if len(depth_labels.shape) == 5:\n # only key-frame will calculate depth loss\n depth_labels = depth_labels[:, 0, ...].contiguous()\n loss_depth = self.get_depth_loss(depth_labels.cuda(), depth_preds)\n self.log('train/detection', loss_detection)\n self.log('train/heatmap', loss_heatmap)\n self.log('train/bbox', loss_bbox)\n self.log('train/depth', loss_depth)\n\n return loss_detection + loss_depth\n\n def get_depth_loss(self, depth_labels, depth_preds, weight=3.):\n depth_labels = self.get_downsampled_gt_depth(depth_labels)\n depth_preds = depth_preds.permute(0, 2, 3, 1).contiguous().view(\n -1, self.depth_channels)\n fg_mask = torch.max(depth_labels, dim=1).values > 0.0\n\n with autocast(enabled=False):\n loss_depth = (F.binary_cross_entropy(\n depth_preds[fg_mask],\n depth_labels[fg_mask],\n reduction='none',\n ).sum() / max(1.0, fg_mask.sum()))\n\n return weight * loss_depth\n\n def get_downsampled_gt_depth(self, gt_depths):\n \"\"\"\n Input:\n gt_depths: [B, N, H, W]\n Output:\n gt_depths: [B*N*h*w, d]\n \"\"\"\n B, N, H, W = gt_depths.shape\n gt_depths = gt_depths.view(\n B * N,\n H // self.downsample_factor,\n self.downsample_factor,\n W // self.downsample_factor,\n self.downsample_factor,\n 1,\n )\n gt_depths = gt_depths.permute(0, 1, 3, 5, 2, 4).contiguous()\n gt_depths = gt_depths.view(\n -1, self.downsample_factor * self.downsample_factor)\n gt_depths_tmp = torch.where(gt_depths == 0.0,\n 1e5 * torch.ones_like(gt_depths),\n gt_depths)\n gt_depths = torch.min(gt_depths_tmp, dim=-1).values\n gt_depths = gt_depths.view(B * N, H // self.downsample_factor,\n W // self.downsample_factor)\n\n gt_depths = (gt_depths -\n (self.dbound[0] - self.dbound[2])) / self.dbound[2]\n gt_depths = torch.where(\n (gt_depths < self.depth_channels + 1) & (gt_depths > 0.),\n gt_depths, torch.zeros_like(gt_depths))\n gt_depths = F.one_hot(gt_depths.long(),\n num_classes=self.depth_channels + 1).view(\n -1, self.depth_channels + 1)[:, 1:]\n return gt_depths.float()\n\n def eval_step(self, batch, batch_idx, prefix: str):\n (sweep_imgs, mats, img_metas, _, _, _, _, pts_pv) = batch\n if torch.cuda.is_available():\n if self.return_image:\n sweep_imgs = sweep_imgs.cuda()\n for key, value in mats.items():\n mats[key] = value.cuda()\n if self.return_radar_pv:\n pts_pv = pts_pv.cuda()\n preds = self(sweep_imgs, mats,\n pts_pv=pts_pv,\n is_train=False)\n if isinstance(self.model, torch.nn.parallel.DistributedDataParallel):\n results = self.model.module.get_bboxes(preds, img_metas)\n else:\n results = self.model.get_bboxes(preds, img_metas)\n for i in range(len(results)):\n results[i][0] = results[i][0].tensor.detach().cpu().numpy()\n results[i][1] = results[i][1].detach().cpu().numpy()\n results[i][2] = results[i][2].detach().cpu().numpy()\n results[i].append(img_metas[i])\n return results\n\n def validation_epoch_end(self, validation_step_outputs):\n detection_losses = list()\n heatmap_losses = list()\n bbox_losses = list()\n depth_losses = list()\n for validation_step_output in validation_step_outputs:\n detection_losses.append(validation_step_output[0])\n heatmap_losses.append(validation_step_output[1])\n bbox_losses.append(validation_step_output[2])\n depth_losses.append(validation_step_output[3])\n synchronize()\n\n self.log('val/detection', torch.mean(torch.stack(detection_losses)), on_epoch=True)\n self.log('val/heatmap', torch.mean(torch.stack(heatmap_losses)), on_epoch=True)\n self.log('val/bbox', torch.mean(torch.stack(bbox_losses)), on_epoch=True)\n self.log('val/depth', torch.mean(torch.stack(depth_losses)), on_epoch=True)\n\n def validation_step(self, batch, batch_idx):\n (sweep_imgs, mats, _, gt_boxes_3d, gt_labels_3d, _, depth_labels, pts_pv) = batch\n if torch.cuda.is_available():\n if self.return_image:\n sweep_imgs = sweep_imgs.cuda()\n for key, value in mats.items():\n mats[key] = value.cuda()\n if self.return_radar_pv:\n pts_pv = pts_pv.cuda()\n gt_boxes_3d = [gt_box.cuda() for gt_box in gt_boxes_3d]\n gt_labels_3d = [gt_label.cuda() for gt_label in gt_labels_3d]\n with torch.no_grad():\n preds, depth_preds = self(sweep_imgs, mats,\n pts_pv=pts_pv,\n is_train=True)\n targets = self.model.get_targets(gt_boxes_3d, gt_labels_3d)\n loss_detection, loss_heatmap, loss_bbox = self.model.loss(targets, preds)\n\n if len(depth_labels.shape) == 5:\n # only key-frame will calculate depth loss\n depth_labels = depth_labels[:, 0, ...].contiguous()\n loss_depth = self.get_depth_loss(depth_labels.cuda(), depth_preds, weight=3.)\n return loss_detection, loss_heatmap, loss_bbox, loss_depth\n\n def test_epoch_end(self, test_step_outputs):\n all_pred_results = list()\n all_img_metas = list()\n for test_step_output in test_step_outputs:\n for i in range(len(test_step_output)):\n all_pred_results.append(test_step_output[i][:3])\n all_img_metas.append(test_step_output[i][3])\n synchronize()\n # TODO: Change another way.\n dataset_length = len(self.val_dataloader().dataset)\n all_pred_results = sum(\n map(list, zip(*all_gather_object(all_pred_results))),\n [])[:dataset_length]\n all_img_metas = sum(map(list, zip(*all_gather_object(all_img_metas))),\n [])[:dataset_length]\n if self.global_rank == 0:\n self.evaluator.evaluate(all_pred_results, all_img_metas)\n\n def configure_optimizers(self):\n optimizer = build_optimizer(self.model, self.optimizer_config)\n scheduler = MultiStepLR(optimizer, [19, 23])\n return [[optimizer], [scheduler]]\n\n def train_dataloader(self):\n train_dataset = NuscDatasetRadarDet(\n ida_aug_conf=self.ida_aug_conf,\n bda_aug_conf=self.bda_aug_conf,\n rda_aug_conf=self.rda_aug_conf,\n img_backbone_conf=self.backbone_img_conf,\n classes=self.class_names,\n data_root=self.data_root,\n info_paths=self.train_info_paths,\n is_train=True,\n use_cbgs=self.data_use_cbgs,\n img_conf=self.img_conf,\n load_interval=self.load_interval,\n num_sweeps=self.num_sweeps,\n sweep_idxes=self.sweep_idxes,\n key_idxes=self.key_idxes,\n return_image=self.return_image,\n return_depth=self.return_depth,\n return_radar_pv=self.return_radar_pv,\n remove_z_axis=self.remove_z_axis,\n depth_path='depth_gt',\n radar_pv_path='radar_pv_filter'\n )\n\n train_loader = torch.utils.data.DataLoader(\n train_dataset,\n batch_size=self.batch_size_per_device,\n num_workers=4,\n drop_last=True,\n shuffle=False,\n collate_fn=partial(collate_fn,\n is_return_image=self.return_image,\n is_return_depth=self.return_depth,\n is_return_radar_pv=self.return_radar_pv),\n sampler=None,\n )\n return train_loader\n\n def val_dataloader(self):\n val_dataset = NuscDatasetRadarDet(\n ida_aug_conf=self.ida_aug_conf,\n bda_aug_conf=self.bda_aug_conf,\n rda_aug_conf=self.rda_aug_conf,\n img_backbone_conf=self.backbone_img_conf,\n classes=self.class_names,\n data_root=self.data_root,\n info_paths=self.val_info_paths,\n is_train=False,\n img_conf=self.img_conf,\n load_interval=self.load_interval,\n num_sweeps=self.num_sweeps,\n sweep_idxes=self.sweep_idxes,\n key_idxes=self.key_idxes,\n return_image=self.return_image,\n return_depth=self.return_depth,\n return_radar_pv=self.return_radar_pv,\n remove_z_axis=self.remove_z_axis,\n radar_pv_path='radar_pv_filter',\n )\n val_loader = torch.utils.data.DataLoader(\n val_dataset,\n batch_size=self.batch_size_per_device,\n num_workers=4,\n shuffle=False,\n collate_fn=partial(collate_fn,\n is_return_image=self.return_image,\n is_return_depth=self.return_depth,\n is_return_radar_pv=self.return_radar_pv),\n sampler=None,\n )\n return val_loader\n\n def test_dataloader(self):\n return self.val_dataloader()\n\n def predict_dataloader(self):\n predict_dataset = NuscDatasetRadarDet(\n ida_aug_conf=self.ida_aug_conf,\n bda_aug_conf=self.bda_aug_conf,\n rda_aug_conf=self.rda_aug_conf,\n img_backbone_conf=self.backbone_img_conf,\n classes=self.class_names,\n data_root=self.data_root,\n info_paths=self.val_info_paths,\n is_train=False,\n img_conf=self.img_conf,\n load_interval=self.load_interval,\n num_sweeps=self.num_sweeps,\n sweep_idxes=self.sweep_idxes,\n key_idxes=self.key_idxes,\n return_image=self.return_image,\n return_depth=self.return_depth,\n return_radar_pv=self.return_radar_pv,\n remove_z_axis=self.remove_z_axis,\n radar_pv_path='radar_pv_filter',\n )\n predict_loader = torch.utils.data.DataLoader(\n predict_dataset,\n batch_size=self.batch_size_per_device,\n num_workers=4,\n shuffle=False,\n collate_fn=partial(collate_fn,\n is_return_image=self.return_image,\n is_return_depth=self.return_depth,\n is_return_radar_pv=self.return_radar_pv),\n sampler=None,\n )\n return predict_loader\n\n def test_step(self, batch, batch_idx):\n return self.eval_step(batch, batch_idx, 'test')\n\n def predict_step(self, batch, batch_idx):\n return self.eval_step(batch, batch_idx, 'predict')\n\n @staticmethod\n def add_model_specific_args(parent_parser): # pragma: no-cover\n return parent_parser" }, { "identifier": "BaseBEVDepth", "path": "models/base_bev_depth.py", "snippet": "class BaseBEVDepth(nn.Module):\n \"\"\"Source code of `BEVDepth`, `https://arxiv.org/abs/2112.11790`.\n\n Args:\n backbone_conf (dict): Config of backbone.\n head_conf (dict): Config of head.\n \"\"\"\n\n def __init__(self, backbone_conf, head_conf):\n super(BaseBEVDepth, self).__init__()\n self.backbone_img = BaseLSSFPN(**backbone_conf)\n self.head = BEVDepthHead(**head_conf)\n\n # for inference time measurement\n self.idx = 0\n self.times_dict = {\n 'img': [],\n 'img_backbone': [],\n 'img_dep': [],\n 'img_transform': [],\n 'img_pool': [],\n\n 'head': [],\n 'head_backbone': [],\n 'head_head': [],\n }\n\n def forward(self,\n sweep_imgs,\n mats_dict,\n is_train=False\n ):\n \"\"\"Forward function for BEVDepth\n\n Args:\n sweep_imgs (Tensor): Input images.\n mats_dict(dict):\n sensor2ego_mats(Tensor): Transformation matrix from\n camera to ego with shape of (B, num_sweeps,\n num_cameras, 4, 4).\n intrin_mats(Tensor): Intrinsic matrix with shape\n of (B, num_sweeps, num_cameras, 4, 4).\n ida_mats(Tensor): Transformation matrix for ida with\n shape of (B, num_sweeps, num_cameras, 4, 4).\n sensor2sensor_mats(Tensor): Transformation matrix\n from key frame camera to sweep frame camera with\n shape of (B, num_sweeps, num_cameras, 4, 4).\n bda_mat(Tensor): Rotation matrix for bda with shape\n of (B, 4, 4).\n\n Returns:\n tuple(list[dict]): Output results for tasks.\n \"\"\"\n if is_train:\n self.time = None\n\n x, depth, _ = self.backbone_img(sweep_imgs, mats_dict,\n is_return_depth=True)\n preds, _ = self.head(x)\n return preds, depth\n else:\n if self.idx < 100: # skip few iterations for warmup\n self.times = None\n elif self.idx == 100:\n self.times = self.times_dict\n\n x, self.times = self.backbone_img(sweep_imgs, mats_dict,\n times=self.times)\n preds, self.times = self.head(x, times=self.times)\n\n if self.idx == 1000:\n time_mean = {}\n for k, v in self.times.items():\n time_mean[k] = sum(v) / len(v)\n print('img: %.2f' % time_mean['img'])\n print(' img_backbone: %.2f' % time_mean['img_backbone'])\n print(' img_dep: %.2f' % time_mean['img_dep'])\n print(' img_transform: %.2f' % time_mean['img_transform'])\n print(' img_pool: %.2f' % time_mean['img_pool'])\n print('head: %.2f' % time_mean['head'])\n print(' head_backbone: %.2f' % time_mean['head_backbone'])\n print(' head_head: %.2f' % time_mean['head_head'])\n total = time_mean['img'] + time_mean['head']\n print('total: %.2f' % total)\n print(' ')\n print('FPS: %.2f' % (1000/total))\n\n self.idx += 1\n return preds\n\n def get_targets(self, gt_boxes, gt_labels):\n \"\"\"Generate training targets for a single sample.\n\n Args:\n gt_bboxes_3d (:obj:`LiDARInstance3DBoxes`): Ground truth gt boxes.\n gt_labels_3d (torch.Tensor): Labels of boxes.\n\n Returns:\n tuple[list[torch.Tensor]]: Tuple of target including \\\n the following results in order.\n\n - list[torch.Tensor]: Heatmap scores.\n - list[torch.Tensor]: Ground truth boxes.\n - list[torch.Tensor]: Indexes indicating the position \\\n of the valid boxes.\n - list[torch.Tensor]: Masks indicating which boxes \\\n are valid.\n \"\"\"\n return self.head.get_targets(gt_boxes, gt_labels)\n\n def loss(self, targets, preds_dicts):\n \"\"\"Loss function for BEVDepth.\n\n Args:\n gt_bboxes_3d (list[:obj:`LiDARInstance3DBoxes`]): Ground\n truth gt boxes.\n gt_labels_3d (list[torch.Tensor]): Labels of boxes.\n preds_dicts (dict): Output of forward function.\n\n Returns:\n dict[str:torch.Tensor]: Loss of heatmap and bbox of each task.\n \"\"\"\n return self.head.loss(targets, preds_dicts)\n\n def get_bboxes(self, preds_dicts, img_metas=None, img=None, rescale=False):\n \"\"\"Generate bboxes from bbox head predictions.\n\n Args:\n preds_dicts (tuple[list[dict]]): Prediction results.\n img_metas (list[dict]): Point cloud and image's meta info.\n\n Returns:\n list[dict]: Decoded bbox, scores and labels after nms.\n \"\"\"\n return self.head.get_bboxes(preds_dicts, img_metas, img, rescale)" } ]
import torch from utils.torch_dist import synchronize from exps.base_cli import run_cli from exps.base_exp import BEVDepthLightningModel as BaseBEVDepthLightningModel from models.base_bev_depth import BaseBEVDepth
8,503
depth=18, num_stages=3, strides=(1, 2, 2), dilations=(1, 1, 1), out_indices=[0, 1, 2], norm_eval=False, base_channels=160, ), 'bev_neck_conf': dict( type='SECONDFPN', in_channels=[80, 160, 320, 640], upsample_strides=[1, 2, 4, 8], out_channels=[64, 64, 64, 64] ), 'tasks': [ dict(num_class=1, class_names=['car']), dict(num_class=2, class_names=['truck', 'construction_vehicle']), dict(num_class=2, class_names=['bus', 'trailer']), dict(num_class=1, class_names=['barrier']), dict(num_class=2, class_names=['motorcycle', 'bicycle']), dict(num_class=2, class_names=['pedestrian', 'traffic_cone']), ], 'common_heads': dict( reg=(2, 2), height=(1, 2), dim=(3, 2), rot=(2, 2), vel=(2, 2)), 'bbox_coder': dict( type='CenterPointBBoxCoder', post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], max_num=500, score_threshold=0.01, out_size_factor=4, voxel_size=[0.2, 0.2, 8], pc_range=[-51.2, -51.2, -5, 51.2, 51.2, 3], code_size=9, ), 'train_cfg': dict( point_cloud_range=[-51.2, -51.2, -5, 51.2, 51.2, 3], grid_size=[512, 512, 1], voxel_size=[0.2, 0.2, 8], out_size_factor=4, dense_reg=1, gaussian_overlap=0.1, max_objs=500, min_radius=2, code_weights=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], ), 'test_cfg': dict( post_center_limit_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], max_per_img=500, max_pool_nms=False, min_radius=[4, 12, 10, 1, 0.85, 0.175], score_threshold=0.01, out_size_factor=4, voxel_size=[0.2, 0.2, 8], nms_type='circle', pre_max_size=1000, post_max_size=200, nms_thr=0.2, ), 'in_channels': 256, # Equal to bev_neck output_channels. 'loss_cls': dict(type='GaussianFocalLoss', reduction='mean'), 'loss_bbox': dict(type='L1Loss', reduction='mean', loss_weight=0.25), 'gaussian_overlap': 0.1, 'min_radius': 2, } ################################################ self.key_idxes = [-2, -4, -6] self.head_conf['bev_backbone_conf']['in_channels'] = 80 * ( len(self.key_idxes) + 1) self.head_conf['bev_neck_conf']['in_channels'] = [ 80 * (len(self.key_idxes) + 1), 160, 320, 640 ] self.dbound = self.backbone_img_conf['d_bound'] self.depth_channels = int( (self.dbound[1] - self.dbound[0]) / self.dbound[2]) self.model = BaseBEVDepth(self.backbone_img_conf, self.head_conf) def forward(self, sweep_imgs, mats, is_train=False, **inputs): return self.model(sweep_imgs, mats, is_train=is_train) def training_step(self, batch): if self.global_rank == 0: for pg in self.trainer.optimizers[0].param_groups: self.log('learning_rate', pg["lr"]) (sweep_imgs, mats, _, gt_boxes_3d, gt_labels_3d, _, depth_labels, _) = batch if torch.cuda.is_available(): if self.return_image: sweep_imgs = sweep_imgs.cuda() for key, value in mats.items(): mats[key] = value.cuda() gt_boxes_3d = [gt_box.cuda() for gt_box in gt_boxes_3d] gt_labels_3d = [gt_label.cuda() for gt_label in gt_labels_3d] preds, depth_preds = self(sweep_imgs, mats, is_train=True) targets = self.model.get_targets(gt_boxes_3d, gt_labels_3d) loss_detection, loss_heatmap, loss_bbox = self.model.loss(targets, preds) if len(depth_labels.shape) == 5: # only key-frame will calculate depth loss depth_labels = depth_labels[:, 0, ...].contiguous() loss_depth = self.get_depth_loss(depth_labels.cuda(), depth_preds, weight=3.) self.log('train/detection', loss_detection) self.log('train/heatmap', loss_heatmap) self.log('train/bbox', loss_bbox) self.log('train/depth', loss_depth) return loss_detection + loss_depth def validation_epoch_end(self, validation_step_outputs): detection_losses = list() heatmap_losses = list() bbox_losses = list() depth_losses = list() for validation_step_output in validation_step_outputs: detection_losses.append(validation_step_output[0]) heatmap_losses.append(validation_step_output[1]) bbox_losses.append(validation_step_output[2]) depth_losses.append(validation_step_output[3])
# Copyright (c) Megvii Inc. All rights reserved. """ mAP: 0.3672 mATE: 0.6827 mASE: 0.2833 mAOE: 0.5354 mAVE: 0.4156 mAAE: 0.2066 NDS: 0.4712 Eval time: 199.7s Per-class results: Object Class AP ATE ASE AOE AVE AAE car 0.540 0.488 0.165 0.153 0.493 0.216 truck 0.302 0.707 0.225 0.182 0.380 0.202 bus 0.387 0.722 0.224 0.121 0.755 0.302 trailer 0.176 1.071 0.255 0.516 0.268 0.086 construction_vehicle 0.103 1.061 0.522 1.298 0.127 0.353 pedestrian 0.310 0.745 0.290 0.829 0.465 0.253 motorcycle 0.390 0.624 0.257 0.691 0.654 0.232 bicycle 0.379 0.494 0.268 0.828 0.183 0.009 traffic_cone 0.516 0.487 0.347 nan nan nan barrier 0.568 0.426 0.280 0.202 nan nan img: 24.63 img_backbone: 11.21 img_dep: 6.67 img_transform: 5.11 img_pool: 0.99 head: 9.04 head_backbone: 3.10 head_head: 5.94 total: 33.68 FPS: 29.70 | Name | Type | Params ----------------------------------------------------------------------- 0 | model | BaseBEVDepth | 77.6 M 1 | model.backbone_img | BaseLSSFPN | 53.3 M 2 | model.backbone_img.img_backbone | ResNet | 23.5 M 3 | model.backbone_img.img_neck | SECONDFPN | 2.0 M 4 | model.backbone_img.depth_net | DepthNet | 27.8 M 5 | model.head | BEVDepthHead | 24.4 M 6 | model.head.loss_cls | GaussianFocalLoss | 0 7 | model.head.loss_bbox | L1Loss | 0 8 | model.head.shared_conv | ConvModule | 147 K 9 | model.head.task_heads | ModuleList | 1.4 M 10 | model.head.trunk | ResNet | 19.8 M 11 | model.head.neck | SECONDFPN | 3.0 M ----------------------------------------------------------------------- """ class BEVDepthLightningModel(BaseBEVDepthLightningModel): def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) self.return_image = True self.return_depth = True self.return_radar_pv = False ################################################ self.optimizer_config = dict( type='AdamW', lr=2e-4, weight_decay=1e-4) ################################################ self.ida_aug_conf = { 'resize_lim': (0.386, 0.55), 'final_dim': (256, 704), 'rot_lim': (-5.4, 5.4), 'H': 900, 'W': 1600, 'rand_flip': True, 'bot_pct_lim': (0.0, 0.0), 'cams': [ 'CAM_FRONT_LEFT', 'CAM_FRONT', 'CAM_FRONT_RIGHT', 'CAM_BACK_LEFT', 'CAM_BACK', 'CAM_BACK_RIGHT' ], 'Ncams': 6, } self.bda_aug_conf = { 'rot_ratio': 1.0, 'rot_lim': (-22.5, 22.5), 'scale_lim': (0.95, 1.05), 'flip_dx_ratio': 0.5, 'flip_dy_ratio': 0.5 } ################################################ self.backbone_img_conf = { 'x_bound': [-51.2, 51.2, 0.8], 'y_bound': [-51.2, 51.2, 0.8], 'z_bound': [-5, 3, 8], 'd_bound': [2.0, 58.0, 0.5], 'final_dim': (256, 704), 'downsample_factor': 16, 'img_backbone_conf': dict( type='ResNet', depth=50, frozen_stages=0, out_indices=[0, 1, 2, 3], norm_eval=False, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), ), 'img_neck_conf': dict( type='SECONDFPN', in_channels=[256, 512, 1024, 2048], upsample_strides=[0.25, 0.5, 1, 2], out_channels=[128, 128, 128, 128], ), 'depth_net_conf': dict(in_channels=512, mid_channels=512), 'camera_aware': True, 'output_channels': 80, } ################################################ self.head_conf = { 'bev_backbone_conf': dict( type='ResNet', in_channels=128, depth=18, num_stages=3, strides=(1, 2, 2), dilations=(1, 1, 1), out_indices=[0, 1, 2], norm_eval=False, base_channels=160, ), 'bev_neck_conf': dict( type='SECONDFPN', in_channels=[80, 160, 320, 640], upsample_strides=[1, 2, 4, 8], out_channels=[64, 64, 64, 64] ), 'tasks': [ dict(num_class=1, class_names=['car']), dict(num_class=2, class_names=['truck', 'construction_vehicle']), dict(num_class=2, class_names=['bus', 'trailer']), dict(num_class=1, class_names=['barrier']), dict(num_class=2, class_names=['motorcycle', 'bicycle']), dict(num_class=2, class_names=['pedestrian', 'traffic_cone']), ], 'common_heads': dict( reg=(2, 2), height=(1, 2), dim=(3, 2), rot=(2, 2), vel=(2, 2)), 'bbox_coder': dict( type='CenterPointBBoxCoder', post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], max_num=500, score_threshold=0.01, out_size_factor=4, voxel_size=[0.2, 0.2, 8], pc_range=[-51.2, -51.2, -5, 51.2, 51.2, 3], code_size=9, ), 'train_cfg': dict( point_cloud_range=[-51.2, -51.2, -5, 51.2, 51.2, 3], grid_size=[512, 512, 1], voxel_size=[0.2, 0.2, 8], out_size_factor=4, dense_reg=1, gaussian_overlap=0.1, max_objs=500, min_radius=2, code_weights=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], ), 'test_cfg': dict( post_center_limit_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], max_per_img=500, max_pool_nms=False, min_radius=[4, 12, 10, 1, 0.85, 0.175], score_threshold=0.01, out_size_factor=4, voxel_size=[0.2, 0.2, 8], nms_type='circle', pre_max_size=1000, post_max_size=200, nms_thr=0.2, ), 'in_channels': 256, # Equal to bev_neck output_channels. 'loss_cls': dict(type='GaussianFocalLoss', reduction='mean'), 'loss_bbox': dict(type='L1Loss', reduction='mean', loss_weight=0.25), 'gaussian_overlap': 0.1, 'min_radius': 2, } ################################################ self.key_idxes = [-2, -4, -6] self.head_conf['bev_backbone_conf']['in_channels'] = 80 * ( len(self.key_idxes) + 1) self.head_conf['bev_neck_conf']['in_channels'] = [ 80 * (len(self.key_idxes) + 1), 160, 320, 640 ] self.dbound = self.backbone_img_conf['d_bound'] self.depth_channels = int( (self.dbound[1] - self.dbound[0]) / self.dbound[2]) self.model = BaseBEVDepth(self.backbone_img_conf, self.head_conf) def forward(self, sweep_imgs, mats, is_train=False, **inputs): return self.model(sweep_imgs, mats, is_train=is_train) def training_step(self, batch): if self.global_rank == 0: for pg in self.trainer.optimizers[0].param_groups: self.log('learning_rate', pg["lr"]) (sweep_imgs, mats, _, gt_boxes_3d, gt_labels_3d, _, depth_labels, _) = batch if torch.cuda.is_available(): if self.return_image: sweep_imgs = sweep_imgs.cuda() for key, value in mats.items(): mats[key] = value.cuda() gt_boxes_3d = [gt_box.cuda() for gt_box in gt_boxes_3d] gt_labels_3d = [gt_label.cuda() for gt_label in gt_labels_3d] preds, depth_preds = self(sweep_imgs, mats, is_train=True) targets = self.model.get_targets(gt_boxes_3d, gt_labels_3d) loss_detection, loss_heatmap, loss_bbox = self.model.loss(targets, preds) if len(depth_labels.shape) == 5: # only key-frame will calculate depth loss depth_labels = depth_labels[:, 0, ...].contiguous() loss_depth = self.get_depth_loss(depth_labels.cuda(), depth_preds, weight=3.) self.log('train/detection', loss_detection) self.log('train/heatmap', loss_heatmap) self.log('train/bbox', loss_bbox) self.log('train/depth', loss_depth) return loss_detection + loss_depth def validation_epoch_end(self, validation_step_outputs): detection_losses = list() heatmap_losses = list() bbox_losses = list() depth_losses = list() for validation_step_output in validation_step_outputs: detection_losses.append(validation_step_output[0]) heatmap_losses.append(validation_step_output[1]) bbox_losses.append(validation_step_output[2]) depth_losses.append(validation_step_output[3])
synchronize()
0
2023-12-06 14:57:49+00:00
12k
LIU-Yuxin/SyncMVD
src/pipeline.py
[ { "identifier": "UVProjection", "path": "src/renderer/project.py", "snippet": "class UVProjection():\n\tdef __init__(self, texture_size=96, render_size=64, sampling_mode=\"nearest\", channels=3, device=None):\n\t\tself.channels = channels\n\t\tself.device = device or torch.device(\"cpu\")\n\t\tself.lights = AmbientLights(ambient_color=((1.0,)*channels,), device=self.device)\n\t\tself.target_size = (texture_size,texture_size)\n\t\tself.render_size = render_size\n\t\tself.sampling_mode = sampling_mode\n\n\n\t# Load obj mesh, rescale the mesh to fit into the bounding box\n\tdef load_mesh(self, mesh_path, scale_factor=2.0, auto_center=True, autouv=False):\n\t\tmesh = load_objs_as_meshes([mesh_path], device=self.device)\n\t\tif auto_center:\n\t\t\tverts = mesh.verts_packed()\n\t\t\tmax_bb = (verts - 0).max(0)[0]\n\t\t\tmin_bb = (verts - 0).min(0)[0]\n\t\t\tscale = (max_bb - min_bb).max()/2\n\t\t\tcenter = (max_bb+min_bb) /2\n\t\t\tmesh.offset_verts_(-center)\n\t\t\tmesh.scale_verts_((scale_factor / float(scale)))\t\t\n\t\telse:\n\t\t\tmesh.scale_verts_((scale_factor))\n\n\t\tif autouv or (mesh.textures is None):\n\t\t\tmesh = self.uv_unwrap(mesh)\n\t\tself.mesh = mesh\n\n\n\tdef load_glb_mesh(self, mesh_path, scale_factor=2.0, auto_center=True, autouv=False):\n\t\tfrom pytorch3d.io.experimental_gltf_io import MeshGlbFormat\n\t\tio = IO()\n\t\tio.register_meshes_format(MeshGlbFormat())\n\t\twith open(mesh_path, \"rb\") as f:\n\t\t\tmesh = io.load_mesh(f, include_textures=True, device=self.device)\n\t\tif auto_center:\n\t\t\tverts = mesh.verts_packed()\n\t\t\tmax_bb = (verts - 0).max(0)[0]\n\t\t\tmin_bb = (verts - 0).min(0)[0]\n\t\t\tscale = (max_bb - min_bb).max()/2 \n\t\t\tcenter = (max_bb+min_bb) /2\n\t\t\tmesh.offset_verts_(-center)\n\t\t\tmesh.scale_verts_((scale_factor / float(scale)))\n\t\telse:\n\t\t\tmesh.scale_verts_((scale_factor))\n\t\tif autouv or (mesh.textures is None):\n\t\t\tmesh = self.uv_unwrap(mesh)\n\t\tself.mesh = mesh\n\n\n\t# Save obj mesh\n\tdef save_mesh(self, mesh_path, texture):\n\t\tsave_obj(mesh_path, \n\t\t\t\tself.mesh.verts_list()[0],\n\t\t\t\tself.mesh.faces_list()[0],\n\t\t\t\tverts_uvs= self.mesh.textures.verts_uvs_list()[0],\n\t\t\t\tfaces_uvs= self.mesh.textures.faces_uvs_list()[0],\n\t\t\t\ttexture_map=texture)\n\n\t# Code referred to TEXTure code (https://github.com/TEXTurePaper/TEXTurePaper.git)\n\tdef uv_unwrap(self, mesh):\n\t\tverts_list = mesh.verts_list()[0]\n\t\tfaces_list = mesh.faces_list()[0]\n\n\n\t\timport xatlas\n\t\timport numpy as np\n\t\tv_np = verts_list.cpu().numpy()\n\t\tf_np = faces_list.int().cpu().numpy()\n\t\tatlas = xatlas.Atlas()\n\t\tatlas.add_mesh(v_np, f_np)\n\t\tchart_options = xatlas.ChartOptions()\n\t\tchart_options.max_iterations = 4\n\t\tatlas.generate(chart_options=chart_options)\n\t\tvmapping, ft_np, vt_np = atlas[0] # [N], [M, 3], [N, 2]\n\n\t\tvt = torch.from_numpy(vt_np.astype(np.float32)).type(verts_list.dtype).to(mesh.device)\n\t\tft = torch.from_numpy(ft_np.astype(np.int64)).type(faces_list.dtype).to(mesh.device)\n\n\t\tnew_map = torch.zeros(self.target_size+(self.channels,), device=mesh.device)\n\t\tnew_tex = TexturesUV(\n\t\t\t[new_map], \n\t\t\t[ft], \n\t\t\t[vt], \n\t\t\tsampling_mode=self.sampling_mode\n\t\t\t)\n\n\t\tmesh.textures = new_tex\n\t\treturn mesh\n\n\n\t'''\n\t\tA functions that disconnect faces in the mesh according to\n\t\tits UV seams. The number of vertices are made equal to the\n\t\tnumber of unique vertices its UV layout, while the faces list\n\t\tis intact.\n\t'''\n\tdef disconnect_faces(self):\n\t\tmesh = self.mesh\n\t\tverts_list = mesh.verts_list()\n\t\tfaces_list = mesh.faces_list()\n\t\tverts_uvs_list = mesh.textures.verts_uvs_list()\n\t\tfaces_uvs_list = mesh.textures.faces_uvs_list()\n\t\tpacked_list = [v[f] for v,f in zip(verts_list, faces_list)]\n\t\tverts_disconnect_list = [\n\t\t\ttorch.zeros(\n\t\t\t\t(verts_uvs_list[i].shape[0], 3), \n\t\t\t\tdtype=verts_list[0].dtype, \n\t\t\t\tdevice=verts_list[0].device\n\t\t\t) \n\t\t\tfor i in range(len(verts_list))]\n\t\tfor i in range(len(verts_list)):\n\t\t\tverts_disconnect_list[i][faces_uvs_list] = packed_list[i]\n\t\tassert not mesh.has_verts_normals(), \"Not implemented for vertex normals\"\n\t\tself.mesh_d = Meshes(verts_disconnect_list, faces_uvs_list, mesh.textures)\n\t\treturn self.mesh_d\n\n\n\t'''\n\t\tA function that construct a temp mesh for back-projection.\n\t\tTake a disconnected mesh and a rasterizer, the function calculates\n\t\tthe projected faces as the UV, as use its original UV with pseudo\n\t\tz value as world space geometry.\n\t'''\n\tdef construct_uv_mesh(self):\n\t\tmesh = self.mesh_d\n\t\tverts_list = mesh.verts_list()\n\t\tverts_uvs_list = mesh.textures.verts_uvs_list()\n\t\t# faces_list = [torch.flip(faces, [-1]) for faces in mesh.faces_list()]\n\t\tnew_verts_list = []\n\t\tfor i, (verts, verts_uv) in enumerate(zip(verts_list, verts_uvs_list)):\n\t\t\tverts = verts.clone()\n\t\t\tverts_uv = verts_uv.clone()\n\t\t\tverts[...,0:2] = verts_uv[...,:]\n\t\t\tverts = (verts - 0.5) * 2\n\t\t\tverts[...,2] *= 1\n\t\t\tnew_verts_list.append(verts)\n\t\ttextures_uv = mesh.textures.clone()\n\t\tself.mesh_uv = Meshes(new_verts_list, mesh.faces_list(), textures_uv)\n\t\treturn self.mesh_uv\n\n\n\t# Set texture for the current mesh.\n\tdef set_texture_map(self, texture):\n\t\tnew_map = texture.permute(1, 2, 0)\n\t\tnew_map = new_map.to(self.device)\n\t\tnew_tex = TexturesUV(\n\t\t\t[new_map], \n\t\t\tself.mesh.textures.faces_uvs_padded(), \n\t\t\tself.mesh.textures.verts_uvs_padded(), \n\t\t\tsampling_mode=self.sampling_mode\n\t\t\t)\n\t\tself.mesh.textures = new_tex\n\n\n\t# Set the initial normal noise texture\n\t# No generator here for replication of the experiment result. Add one as you wish\n\tdef set_noise_texture(self, channels=None):\n\t\tif not channels:\n\t\t\tchannels = self.channels\n\t\tnoise_texture = torch.normal(0, 1, (channels,) + self.target_size, device=self.device)\n\t\tself.set_texture_map(noise_texture)\n\t\treturn noise_texture\n\n\n\t# Set the cameras given the camera poses and centers\n\tdef set_cameras(self, camera_poses, centers=None, camera_distance=2.7, scale=None):\n\t\telev = torch.FloatTensor([pose[0] for pose in camera_poses])\n\t\tazim = torch.FloatTensor([pose[1] for pose in camera_poses])\n\t\tR, T = look_at_view_transform(dist=camera_distance, elev=elev, azim=azim, at=centers or ((0,0,0),))\n\t\tself.cameras = FoVOrthographicCameras(device=self.device, R=R, T=T, scale_xyz=scale or ((1,1,1),))\n\n\n\t# Set all necessary internal data for rendering and texture baking\n\t# Can be used to refresh after changing camera positions\n\tdef set_cameras_and_render_settings(self, camera_poses, centers=None, camera_distance=2.7, render_size=None, scale=None):\n\t\tself.set_cameras(camera_poses, centers, camera_distance, scale=scale)\n\t\tif render_size is None:\n\t\t\trender_size = self.render_size\n\t\tif not hasattr(self, \"renderer\"):\n\t\t\tself.setup_renderer(size=render_size)\n\t\tif not hasattr(self, \"mesh_d\"):\n\t\t\tself.disconnect_faces()\n\t\tif not hasattr(self, \"mesh_uv\"):\n\t\t\tself.construct_uv_mesh()\n\t\tself.calculate_tex_gradient()\n\t\tself.calculate_visible_triangle_mask()\n\t\t_,_,_,cos_maps,_, _ = self.render_geometry()\n\t\tself.calculate_cos_angle_weights(cos_maps)\n\n\n\t# Setup renderers for rendering\n\t# max faces per bin set to 30000 to avoid overflow in many test cases.\n\t# You can use default value to let pytorch3d handle that for you.\n\tdef setup_renderer(self, size=64, blur=0.0, face_per_pix=1, perspective_correct=False, channels=None):\n\t\tif not channels:\n\t\t\tchannels = self.channels\n\n\t\tself.raster_settings = RasterizationSettings(\n\t\t\timage_size=size, \n\t\t\tblur_radius=blur, \n\t\t\tfaces_per_pixel=face_per_pix,\n\t\t\tperspective_correct=perspective_correct,\n\t\t\tcull_backfaces=True,\n\t\t\tmax_faces_per_bin=30000,\n\t\t)\n\n\t\tself.renderer = MeshRenderer(\n\t\t\trasterizer=MeshRasterizer(\n\t\t\t\tcameras=self.cameras, \n\t\t\t\traster_settings=self.raster_settings,\n\n\t\t\t),\n\t\t\tshader=HardNChannelFlatShader(\n\t\t\t\tdevice=self.device, \n\t\t\t\tcameras=self.cameras,\n\t\t\t\tlights=self.lights,\n\t\t\t\tchannels=channels\n\t\t\t\t# materials=materials\n\t\t\t)\n\t\t)\n\n\n\t# Bake screen-space cosine weights to UV space\n\t# May be able to reimplement using the generic \"bake_texture\" function, but it works so leave it here for now\n\[email protected]_grad()\n\tdef calculate_cos_angle_weights(self, cos_angles, fill=True, channels=None):\n\t\tif not channels:\n\t\t\tchannels = self.channels\n\t\tcos_maps = []\n\t\ttmp_mesh = self.mesh.clone()\n\t\tfor i in range(len(self.cameras)):\n\t\t\t\n\t\t\tzero_map = torch.zeros(self.target_size+(channels,), device=self.device, requires_grad=True)\n\t\t\toptimizer = torch.optim.SGD([zero_map], lr=1, momentum=0)\n\t\t\toptimizer.zero_grad()\n\t\t\tzero_tex = TexturesUV([zero_map], self.mesh.textures.faces_uvs_padded(), self.mesh.textures.verts_uvs_padded(), sampling_mode=self.sampling_mode)\n\t\t\ttmp_mesh.textures = zero_tex\n\n\t\t\timages_predicted = self.renderer(tmp_mesh, cameras=self.cameras[i], lights=self.lights)\n\n\t\t\tloss = torch.sum((cos_angles[i,:,:,0:1]**1 - images_predicted)**2)\n\t\t\tloss.backward()\n\t\t\toptimizer.step()\n\n\t\t\tif fill:\n\t\t\t\tzero_map = zero_map.detach() / (self.gradient_maps[i] + 1E-8)\n\t\t\t\tzero_map = voronoi_solve(zero_map, self.gradient_maps[i][...,0])\n\t\t\telse:\n\t\t\t\tzero_map = zero_map.detach() / (self.gradient_maps[i]+1E-8)\n\t\t\tcos_maps.append(zero_map)\n\t\tself.cos_maps = cos_maps\n\n\t\t\n\t# Get geometric info from fragment shader\n\t# Can be used for generating conditioning image and cosine weights\n\t# Returns some information you may not need, remember to release them for memory saving\n\[email protected]_grad()\n\tdef render_geometry(self, image_size=None):\n\t\tif image_size:\n\t\t\tsize = self.renderer.rasterizer.raster_settings.image_size\n\t\t\tself.renderer.rasterizer.raster_settings.image_size = image_size\n\t\tshader = self.renderer.shader\n\t\tself.renderer.shader = HardGeometryShader(device=self.device, cameras=self.cameras[0], lights=self.lights)\n\t\ttmp_mesh = self.mesh.clone()\n\t\t\n\t\tverts, normals, depths, cos_angles, texels, fragments = self.renderer(tmp_mesh.extend(len(self.cameras)), cameras=self.cameras, lights=self.lights)\n\t\tself.renderer.shader = shader\n\n\t\tif image_size:\n\t\t\tself.renderer.rasterizer.raster_settings.image_size = size\n\n\t\treturn verts, normals, depths, cos_angles, texels, fragments\n\n\n\t# Project world normal to view space and normalize\n\[email protected]_grad()\n\tdef decode_view_normal(self, normals):\n\t\tw2v_mat = self.cameras.get_full_projection_transform()\n\t\tnormals_view = torch.clone(normals)[:,:,:,0:3]\n\t\tnormals_view = normals_view.reshape(normals_view.shape[0], -1, 3)\n\t\tnormals_view = w2v_mat.transform_normals(normals_view)\n\t\tnormals_view = normals_view.reshape(normals.shape[0:3]+(3,))\n\t\tnormals_view[:,:,:,2] *= -1\n\t\tnormals = (normals_view[...,0:3]+1) * normals[...,3:] / 2 + torch.FloatTensor(((((0.5,0.5,1))))).to(self.device) * (1 - normals[...,3:])\n\t\t# normals = torch.cat([normal for normal in normals], dim=1)\n\t\tnormals = normals.clamp(0, 1)\n\t\treturn normals\n\n\n\t# Normalize absolute depth to inverse depth\n\[email protected]_grad()\n\tdef decode_normalized_depth(self, depths, batched_norm=False):\n\t\tview_z, mask = depths.unbind(-1)\n\t\tview_z = view_z * mask + 100 * (1-mask)\n\t\tinv_z = 1 / view_z\n\t\tinv_z_min = inv_z * mask + 100 * (1-mask)\n\t\tif not batched_norm:\n\t\t\tmax_ = torch.max(inv_z, 1, keepdim=True)\n\t\t\tmax_ = torch.max(max_[0], 2, keepdim=True)[0]\n\n\t\t\tmin_ = torch.min(inv_z_min, 1, keepdim=True)\n\t\t\tmin_ = torch.min(min_[0], 2, keepdim=True)[0]\n\t\telse:\n\t\t\tmax_ = torch.max(inv_z)\n\t\t\tmin_ = torch.min(inv_z_min)\n\t\tinv_z = (inv_z - min_) / (max_ - min_)\n\t\tinv_z = inv_z.clamp(0,1)\n\t\tinv_z = inv_z[...,None].repeat(1,1,1,3)\n\n\t\treturn inv_z\n\n\n\t# Multiple screen pixels could pass gradient to a same texel\n\t# We can precalculate this gradient strength and use it to normalize gradients when we bake textures\n\[email protected]_grad()\n\tdef calculate_tex_gradient(self, channels=None):\n\t\tif not channels:\n\t\t\tchannels = self.channels\n\t\ttmp_mesh = self.mesh.clone()\n\t\tgradient_maps = []\n\t\tfor i in range(len(self.cameras)):\n\t\t\tzero_map = torch.zeros(self.target_size+(channels,), device=self.device, requires_grad=True)\n\t\t\toptimizer = torch.optim.SGD([zero_map], lr=1, momentum=0)\n\t\t\toptimizer.zero_grad()\n\t\t\tzero_tex = TexturesUV([zero_map], self.mesh.textures.faces_uvs_padded(), self.mesh.textures.verts_uvs_padded(), sampling_mode=self.sampling_mode)\n\t\t\ttmp_mesh.textures = zero_tex\n\t\t\timages_predicted = self.renderer(tmp_mesh, cameras=self.cameras[i], lights=self.lights)\n\t\t\tloss = torch.sum((1 - images_predicted)**2)\n\t\t\tloss.backward()\n\t\t\toptimizer.step()\n\n\t\t\tgradient_maps.append(zero_map.detach())\n\n\t\tself.gradient_maps = gradient_maps\n\n\n\t# Get the UV space masks of triangles visible in each view\n\t# First get face ids from each view, then filter pixels on UV space to generate masks\n\[email protected]_grad()\n\tdef calculate_visible_triangle_mask(self, channels=None, image_size=(512,512)):\n\t\tif not channels:\n\t\t\tchannels = self.channels\n\n\t\tpix2face_list = []\n\t\tfor i in range(len(self.cameras)):\n\t\t\tself.renderer.rasterizer.raster_settings.image_size=image_size\n\t\t\tpix2face = self.renderer.rasterizer(self.mesh_d, cameras=self.cameras[i]).pix_to_face\n\t\t\tself.renderer.rasterizer.raster_settings.image_size=self.render_size\n\t\t\tpix2face_list.append(pix2face)\n\n\t\tif not hasattr(self, \"mesh_uv\"):\n\t\t\tself.construct_uv_mesh()\n\n\t\traster_settings = RasterizationSettings(\n\t\t\timage_size=self.target_size, \n\t\t\tblur_radius=0, \n\t\t\tfaces_per_pixel=1,\n\t\t\tperspective_correct=False,\n\t\t\tcull_backfaces=False,\n\t\t\tmax_faces_per_bin=30000,\n\t\t\t)\n\n\t\tR, T = look_at_view_transform(dist=2, elev=0, azim=0)\n\t\tcameras = FoVOrthographicCameras(device=self.device, R=R, T=T)\n\n\t\trasterizer=MeshRasterizer(\n\t\t\tcameras=cameras, \n\t\t\traster_settings=raster_settings\n\t\t)\n\t\tuv_pix2face = rasterizer(self.mesh_uv).pix_to_face\n\n\t\tvisible_triangles = []\n\t\tfor i in range(len(pix2face_list)):\n\t\t\tvalid_faceid = torch.unique(pix2face_list[i])\n\t\t\tvalid_faceid = valid_faceid[1:] if valid_faceid[0]==-1 else valid_faceid\n\t\t\tmask = torch.isin(uv_pix2face[0], valid_faceid, assume_unique=False)\n\t\t\t# uv_pix2face[0][~mask] = -1\n\t\t\ttriangle_mask = torch.ones(self.target_size+(1,), device=self.device)\n\t\t\ttriangle_mask[~mask] = 0\n\t\t\t\n\t\t\ttriangle_mask[:,1:][triangle_mask[:,:-1] > 0] = 1\n\t\t\ttriangle_mask[:,:-1][triangle_mask[:,1:] > 0] = 1\n\t\t\ttriangle_mask[1:,:][triangle_mask[:-1,:] > 0] = 1\n\t\t\ttriangle_mask[:-1,:][triangle_mask[1:,:] > 0] = 1\n\t\t\tvisible_triangles.append(triangle_mask)\n\n\t\tself.visible_triangles = visible_triangles\n\n\n\n\t# Render the current mesh and texture from current cameras\n\tdef render_textured_views(self):\n\t\tmeshes = self.mesh.extend(len(self.cameras))\n\t\timages_predicted = self.renderer(meshes, cameras=self.cameras, lights=self.lights)\n\n\t\treturn [image.permute(2, 0, 1) for image in images_predicted]\n\n\n\t# Bake views into a texture\n\t# First bake into individual textures then combine based on cosine weight\n\[email protected]_grad()\n\tdef bake_texture(self, views=None, main_views=[], cos_weighted=True, channels=None, exp=None, noisy=False, generator=None):\n\t\tif not exp:\n\t\t\texp=1\n\t\tif not channels:\n\t\t\tchannels = self.channels\n\t\tviews = [view.permute(1, 2, 0) for view in views]\n\n\t\ttmp_mesh = self.mesh\n\t\tbake_maps = [torch.zeros(self.target_size+(views[0].shape[2],), device=self.device, requires_grad=True) for view in views]\n\t\toptimizer = torch.optim.SGD(bake_maps, lr=1, momentum=0)\n\t\toptimizer.zero_grad()\n\t\tloss = 0\n\t\tfor i in range(len(self.cameras)): \n\t\t\tbake_tex = TexturesUV([bake_maps[i]], tmp_mesh.textures.faces_uvs_padded(), tmp_mesh.textures.verts_uvs_padded(), sampling_mode=self.sampling_mode)\n\t\t\ttmp_mesh.textures = bake_tex\n\t\t\timages_predicted = self.renderer(tmp_mesh, cameras=self.cameras[i], lights=self.lights, device=self.device)\n\t\t\tpredicted_rgb = images_predicted[..., :-1]\n\t\t\tloss += (((predicted_rgb[...] - views[i]))**2).sum()\n\t\tloss.backward(retain_graph=False)\n\t\toptimizer.step()\n\n\t\ttotal_weights = 0\n\t\tbaked = 0\n\t\tfor i in range(len(bake_maps)):\n\t\t\tnormalized_baked_map = bake_maps[i].detach() / (self.gradient_maps[i] + 1E-8)\n\t\t\tbake_map = voronoi_solve(normalized_baked_map, self.gradient_maps[i][...,0])\n\t\t\tweight = self.visible_triangles[i] * (self.cos_maps[i]) ** exp\n\t\t\tif noisy:\n\t\t\t\tnoise = torch.rand(weight.shape[:-1]+(1,), generator=generator).type(weight.dtype).to(weight.device)\n\t\t\t\tweight *= noise\n\t\t\ttotal_weights += weight\n\t\t\tbaked += bake_map * weight\n\t\tbaked /= total_weights + 1E-8\n\t\tbaked = voronoi_solve(baked, total_weights[...,0])\n\n\t\tbake_tex = TexturesUV([baked], tmp_mesh.textures.faces_uvs_padded(), tmp_mesh.textures.verts_uvs_padded(), sampling_mode=self.sampling_mode)\n\t\ttmp_mesh.textures = bake_tex\n\t\textended_mesh = tmp_mesh.extend(len(self.cameras))\n\t\timages_predicted = self.renderer(extended_mesh, cameras=self.cameras, lights=self.lights)\n\t\tlearned_views = [image.permute(2, 0, 1) for image in images_predicted]\n\n\t\treturn learned_views, baked.permute(2, 0, 1), total_weights.permute(2, 0, 1)\n\n\n\t# Move the internel data to a specific device\n\tdef to(self, device):\n\t\tfor mesh_name in [\"mesh\", \"mesh_d\", \"mesh_uv\"]:\n\t\t\tif hasattr(self, mesh_name):\n\t\t\t\tmesh = getattr(self, mesh_name)\n\t\t\t\tsetattr(self, mesh_name, mesh.to(device))\n\t\tfor list_name in [\"visible_triangles\", \"visibility_maps\", \"cos_maps\"]:\n\t\t\tif hasattr(self, list_name):\n\t\t\t\tmap_list = getattr(self, list_name)\n\t\t\t\tfor i in range(len(map_list)):\n\t\t\t\t\tmap_list[i] = map_list[i].to(device)" }, { "identifier": "SamplewiseAttnProcessor2_0", "path": "src/syncmvd/attention.py", "snippet": "class SamplewiseAttnProcessor2_0:\n\tr\"\"\"\n\tProcessor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0).\n\t\"\"\"\n\n\tdef __init__(self, custom_attention_mask=None, ref_attention_mask=None, ref_weight=0):\n\t\tif not hasattr(F, \"scaled_dot_product_attention\"):\n\t\t\traise ImportError(\"AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.\")\n\t\tself.ref_weight = ref_weight\n\t\tself.custom_attention_mask = custom_attention_mask\n\t\tself.ref_attention_mask = ref_attention_mask\n\n\tdef __call__(\n\t\tself,\n\t\tattn: Attention,\n\t\thidden_states,\n\t\tencoder_hidden_states=None,\n\t\tattention_mask=None,\n\t\ttemb=None,\n\t):\n\n\t\tresidual = hidden_states\n\n\t\tif attn.spatial_norm is not None:\n\t\t\thidden_states = attn.spatial_norm(hidden_states, temb)\n\n\t\tinput_ndim = hidden_states.ndim\n\n\n\t\tif input_ndim == 4:\n\t\t\tbatch_size, channel, height, width = hidden_states.shape\n\t\t\thidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)\n\n\t\tbatch_size, sequence_length, channels = (\n\t\t\thidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape\n\t\t)\n\n\t\tif attention_mask is not None:\n\t\t\tattention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)\n\t\t\t# scaled_dot_product_attention expects attention_mask shape to be\n\t\t\t# (batch, heads, source_length, target_length)\n\t\t\tattention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1])\n\n\t\tif attn.group_norm is not None:\n\t\t\thidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)\n\n\t\tquery = attn.to_q(hidden_states)\n\n\t\tif encoder_hidden_states is None:\n\t\t\tencoder_hidden_states = torch.clone(hidden_states)\n\t\telif attn.norm_cross:\n\t\t\tencoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)\n\n\n\t\t'''\n\t\t\treshape encoder hidden state to a single batch\n\t\t'''\n\t\tencoder_hidden_states_f = encoder_hidden_states.reshape(1, -1, channels)\n\n\n\n\t\tkey = attn.to_k(encoder_hidden_states)\n\t\tvalue = attn.to_v(encoder_hidden_states)\n\n\t\tinner_dim = key.shape[-1]\n\t\thead_dim = inner_dim // attn.heads\n\n\t\tquery = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)\n\n\t\t'''\n\t\t\teach time select 1 sample from q and compute with concated kv\n\t\t\tconcat result hidden states afterwards\n\t\t'''\n\t\thidden_state_list = []\n\n\t\tfor b_idx in range(batch_size):\n\t\t\t\n\t\t\tquery_b = query[b_idx:b_idx+1]\n\n\t\t\tif self.ref_weight > 0 or True:\n\t\t\t\tkey_ref = key.clone()\n\t\t\t\tvalue_ref = value.clone()\n\n\t\t\t\tkeys = [key_ref[view_idx] for view_idx in self.ref_attention_mask]\n\t\t\t\tvalues = [value_ref[view_idx] for view_idx in self.ref_attention_mask]\n\n\t\t\t\tkey_ref = torch.stack(keys)\n\t\t\t\tkey_ref = key_ref.view(key_ref.shape[0], -1, attn.heads, head_dim).permute(2, 0, 1, 3).contiguous().view(attn.heads, -1, head_dim)[None,...]\n\n\t\t\t\tvalue_ref = torch.stack(values)\n\t\t\t\tvalue_ref = value_ref.view(value_ref.shape[0], -1, attn.heads, head_dim).permute(2, 0, 1, 3).contiguous().view(attn.heads, -1, head_dim)[None,...]\n\n\t\t\tkey_a = key.clone()\n\t\t\tvalue_a = value.clone()\n\n\t\t\t# key_a = key_a[max(0,b_idx-1):min(b_idx+1,batch_size)+1]\n\n\t\t\tkeys = [key_a[view_idx] for view_idx in self.custom_attention_mask[b_idx]]\n\t\t\tvalues = [value_a[view_idx] for view_idx in self.custom_attention_mask[b_idx]]\n\n\t\t\t# keys = (key_a[b_idx-1], key_a[b_idx], key_a[(b_idx+1)%batch_size])\n\t\t\t# values = (value_a[b_idx-1], value_a[b_idx], value_a[(b_idx+1)%batch_size])\n\t\t\t\n\t\t\t# if b_idx not in [0, batch_size-1, batch_size//2]:\n\t\t\t# \tkeys = keys + (key_a[min(batch_size-2, 2*(batch_size//2) - b_idx)],)\n\t\t\t# \tvalues = values + (value_a[min(batch_size-2, 2*(batch_size//2) - b_idx)],)\n\t\t\tkey_a = torch.stack(keys)\n\t\t\tkey_a = key_a.view(key_a.shape[0], -1, attn.heads, head_dim).permute(2, 0, 1, 3).contiguous().view(attn.heads, -1, head_dim)[None,...]\n\n\t\t\t# value_a = value_a[max(0,b_idx-1):min(b_idx+1,batch_size)+1]\n\t\t\tvalue_a = torch.stack(values)\n\t\t\tvalue_a = value_a.view(value_a.shape[0], -1, attn.heads, head_dim).permute(2, 0, 1, 3).contiguous().view(attn.heads, -1, head_dim)[None,...]\n\n\t\t\thidden_state_a = F.scaled_dot_product_attention(\n\t\t\t\tquery_b, key_a, value_a, attn_mask=None, dropout_p=0.0, is_causal=False\n\t\t\t)\n\n\t\t\tif self.ref_weight > 0 or True:\n\t\t\t\thidden_state_ref = F.scaled_dot_product_attention(\n\t\t\t\t\tquery_b, key_ref, value_ref, attn_mask=None, dropout_p=0.0, is_causal=False\n\t\t\t\t)\n\n\t\t\t\thidden_state = (hidden_state_a + self.ref_weight * hidden_state_ref) / (1+self.ref_weight)\n\t\t\telse:\n\t\t\t\thidden_state = hidden_state_a\n\n\t\t\t# the output of sdp = (batch, num_heads, seq_len, head_dim)\n\t\t\t# TODO: add support for attn.scale when we move to Torch 2.1\n\t\t\t\n\t\t\thidden_state_list.append(hidden_state)\n\n\t\thidden_states = torch.cat(hidden_state_list)\n\n\n\t\thidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)\n\t\thidden_states = hidden_states.to(query.dtype)\n\n\t\t# linear proj\n\t\thidden_states = attn.to_out[0](hidden_states)\n\t\t# dropout\n\t\thidden_states = attn.to_out[1](hidden_states)\n\n\t\tif input_ndim == 4:\n\t\t\thidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)\n\n\t\tif attn.residual_connection:\n\t\t\thidden_states = hidden_states + residual\n\n\t\thidden_states = hidden_states / attn.rescale_output_factor\n\n\t\treturn hidden_states" }, { "identifier": "replace_attention_processors", "path": "src/syncmvd/attention.py", "snippet": "def replace_attention_processors(module, processor, attention_mask=None, ref_attention_mask=None, ref_weight=0):\n\tattn_processors = module.attn_processors\n\tfor k, v in attn_processors.items():\n\t\tif \"attn1\" in k:\n\t\t\tattn_processors[k] = processor(custom_attention_mask=attention_mask, ref_attention_mask=ref_attention_mask, ref_weight=ref_weight)\n\tmodule.set_attn_processor(attn_processors)" }, { "identifier": "step_tex", "path": "src/syncmvd/step.py", "snippet": "@torch.no_grad()\ndef step_tex(\n\t\tscheduler,\n\t\tuvp,\n\t\tmodel_output: torch.FloatTensor,\n\t\ttimestep: int,\n\t\tsample: torch.FloatTensor,\n\t\ttexture: None,\n\t\tgenerator=None,\n\t\treturn_dict: bool = True,\n\t\tguidance_scale = 1,\n\t\tmain_views = [],\n\t\thires_original_views = True,\n\t\texp=None,\n\t\tcos_weighted=True\n):\n\tt = timestep\n\n\tprev_t = scheduler.previous_timestep(t)\n\n\tif model_output.shape[1] == sample.shape[1] * 2 and scheduler.variance_type in [\"learned\", \"learned_range\"]:\n\t\tmodel_output, predicted_variance = torch.split(model_output, sample.shape[1], dim=1)\n\telse:\n\t\tpredicted_variance = None\n\n\t# 1. compute alphas, betas\n\talpha_prod_t = scheduler.alphas_cumprod[t]\n\talpha_prod_t_prev = scheduler.alphas_cumprod[prev_t] if prev_t >= 0 else scheduler.one\n\tbeta_prod_t = 1 - alpha_prod_t\n\tbeta_prod_t_prev = 1 - alpha_prod_t_prev\n\tcurrent_alpha_t = alpha_prod_t / alpha_prod_t_prev\n\tcurrent_beta_t = 1 - current_alpha_t\n\n\t# 2. compute predicted original sample from predicted noise also called\n\t# \"predicted x_0\" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf\n\tif scheduler.config.prediction_type == \"epsilon\":\n\t\tpred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5)\n\telif scheduler.config.prediction_type == \"sample\":\n\t\tpred_original_sample = model_output\n\telif scheduler.config.prediction_type == \"v_prediction\":\n\t\tpred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output\n\telse:\n\t\traise ValueError(\n\t\t\tf\"prediction_type given as {scheduler.config.prediction_type} must be one of `epsilon`, `sample` or\"\n\t\t\t\" `v_prediction` for the DDPMScheduler.\"\n\t\t)\n\n\t# 3. Clip or threshold \"predicted x_0\"\n\tif scheduler.config.thresholding:\n\t\tpred_original_sample = scheduler._threshold_sample(pred_original_sample)\n\telif scheduler.config.clip_sample:\n\t\tpred_original_sample = pred_original_sample.clamp(\n\t\t\t-scheduler.config.clip_sample_range, scheduler.config.clip_sample_range\n\t\t)\n\n\t# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t\n\t# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf\n\tpred_original_sample_coeff = (alpha_prod_t_prev ** (0.5) * current_beta_t) / beta_prod_t\n\tcurrent_sample_coeff = current_alpha_t ** (0.5) * beta_prod_t_prev / beta_prod_t\n\n\t'''\n\t\tAdd multidiffusion here\n\t'''\n\n\tif texture is None:\n\t\tsample_views = [view for view in sample]\n\t\tsample_views, texture, _ = uvp.bake_texture(views=sample_views, main_views=main_views, exp=exp)\n\t\tsample_views = torch.stack(sample_views, axis=0)[:,:-1,...]\n\n\n\toriginal_views = [view for view in pred_original_sample]\n\toriginal_views, original_tex, visibility_weights = uvp.bake_texture(views=original_views, main_views=main_views, exp=exp)\n\tuvp.set_texture_map(original_tex)\n\toriginal_views = uvp.render_textured_views()\n\toriginal_views = torch.stack(original_views, axis=0)[:,:-1,...]\n\n\t# 5. Compute predicted previous sample µ_t\n\t# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf\n\t# pred_prev_sample = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample\n\tprev_tex = pred_original_sample_coeff * original_tex + current_sample_coeff * texture\n\n\t# 6. Add noise\n\tvariance = 0\n\n\tif predicted_variance is not None:\n\t\tvariance_views = [view for view in predicted_variance]\n\t\tvariance_views, variance_tex, visibility_weights = uvp.bake_texture(views=variance_views, main_views=main_views, cos_weighted=cos_weighted, exp=exp)\n\t\tvariance_views = torch.stack(variance_views, axis=0)[:,:-1,...]\n\telse:\n\t\tvariance_tex = None\n\n\tif t > 0:\n\t\tdevice = texture.device\n\t\tvariance_noise = randn_tensor(\n\t\t\ttexture.shape, generator=generator, device=device, dtype=texture.dtype\n\t\t)\n\t\tif scheduler.variance_type == \"fixed_small_log\":\n\t\t\tvariance = scheduler._get_variance(t, predicted_variance=variance_tex) * variance_noise\n\t\telif scheduler.variance_type == \"learned_range\":\n\t\t\tvariance = scheduler._get_variance(t, predicted_variance=variance_tex)\n\t\t\tvariance = torch.exp(0.5 * variance) * variance_noise\n\t\telse:\n\t\t\tvariance = (scheduler._get_variance(t, predicted_variance=variance_tex) ** 0.5) * variance_noise\n\n\tprev_tex = prev_tex + variance\n\n\tuvp.set_texture_map(prev_tex)\n\tprev_views = uvp.render_textured_views()\n\tpred_prev_sample = torch.clone(sample)\n\tfor i, view in enumerate(prev_views):\n\t\tpred_prev_sample[i] = view[:-1]\n\tmasks = [view[-1:] for view in prev_views]\n\n\treturn {\"prev_sample\": pred_prev_sample, \"pred_original_sample\":pred_original_sample, \"prev_tex\": prev_tex}\n\n\tif not return_dict:\n\t\treturn pred_prev_sample, pred_original_sample\n\tpass" } ]
import os import numpy as np import math import random import torch import select import sys from typing import Any, Callable, Dict, List, Optional, Tuple, Union from PIL import Image from IPython.display import display from torch import functional as F from torch import nn from torchvision.transforms import Compose, Resize, GaussianBlur, InterpolationMode from diffusers import StableDiffusionControlNetPipeline, ControlNetModel from diffusers import DDPMScheduler, DDIMScheduler, UniPCMultistepScheduler from diffusers.models import AutoencoderKL, ControlNetModel, UNet2DConditionModel from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.image_processor import VaeImageProcessor from diffusers.utils import ( BaseOutput, randn_tensor, numpy_to_pil, pt_to_pil, # make_image_grid, is_accelerate_available, is_accelerate_version, is_compiled_module, logging, randn_tensor, replace_example_docstring ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.models.attention_processor import Attention, AttentionProcessor from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from .renderer.project import UVProjection as UVP from .syncmvd.attention import SamplewiseAttnProcessor2_0, replace_attention_processors from .syncmvd.prompt import * from .syncmvd.step import step_tex from .utils import *
10,786
group_sets.append((group, ref_group)) group_metas = [] for group, ref_group in group_sets: in_mask = sorted(list(group | ref_group)) out_mask = [] group_attention_masks = [] for idx in in_mask: if idx in group: out_mask.append(in_mask.index(idx)) group_attention_masks.append([in_mask.index(idxx) for idxx in attention_mask[idx] if idxx in in_mask]) ref_attention_mask = [in_mask.index(idx) for idx in ref_view] group_metas.append([in_mask, out_mask, group_attention_masks, ref_attention_mask]) return group_metas ''' MultiView-Diffusion Stable-Diffusion Pipeline Modified from a Diffusers StableDiffusionControlNetPipeline Just mimic the pipeline structure but did not follow any API convention ''' class StableSyncMVDPipeline(StableDiffusionControlNetPipeline): def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel]], scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool = False, ): super().__init__( vae, text_encoder, tokenizer, unet, controlnet, scheduler, safety_checker, feature_extractor, requires_safety_checker ) self.scheduler = DDPMScheduler.from_config(self.scheduler.config) self.model_cpu_offload_seq = "vae->text_encoder->unet->vae" self.enable_model_cpu_offload() self.enable_vae_slicing() self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) def initialize_pipeline( self, mesh_path=None, mesh_transform=None, mesh_autouv=None, camera_azims=None, camera_centers=None, top_cameras=True, ref_views=[], latent_size=None, render_rgb_size=None, texture_size=None, texture_rgb_size=None, max_batch_size=24, logging_config=None, ): # Make output dir output_dir = logging_config["output_dir"] self.result_dir = f"{output_dir}/results" self.intermediate_dir = f"{output_dir}/intermediate" dirs = [output_dir, self.result_dir, self.intermediate_dir] for dir_ in dirs: if not os.path.isdir(dir_): os.mkdir(dir_) # Define the cameras for rendering self.camera_poses = [] self.attention_mask=[] self.centers = camera_centers cam_count = len(camera_azims) front_view_diff = 360 back_view_diff = 360 front_view_idx = 0 back_view_idx = 0 for i, azim in enumerate(camera_azims): if azim < 0: azim += 360 self.camera_poses.append((0, azim)) self.attention_mask.append([(cam_count+i-1)%cam_count, i, (i+1)%cam_count]) if abs(azim) < front_view_diff: front_view_idx = i front_view_diff = abs(azim) if abs(azim - 180) < back_view_diff: back_view_idx = i back_view_diff = abs(azim - 180) # Add two additional cameras for painting the top surfaces if top_cameras: self.camera_poses.append((30, 0)) self.camera_poses.append((30, 180)) self.attention_mask.append([front_view_idx, cam_count]) self.attention_mask.append([back_view_idx, cam_count+1]) # Reference view for attention (all views attend the the views in this list) # A forward view will be used if not specified if len(ref_views) == 0: ref_views = [front_view_idx] # Calculate in-group attention mask self.group_metas = split_groups(self.attention_mask, max_batch_size, ref_views) # Set up pytorch3D for projection between screen space and UV space # uvp is for latent and uvp_rgb for rgb color
if torch.cuda.is_available(): device = torch.device("cuda:0") torch.cuda.set_device(device) else: device = torch.device("cpu") # Background colors color_constants = {"black": [-1, -1, -1], "white": [1, 1, 1], "maroon": [0, -1, -1], "red": [1, -1, -1], "olive": [0, 0, -1], "yellow": [1, 1, -1], "green": [-1, 0, -1], "lime": [-1 ,1, -1], "teal": [-1, 0, 0], "aqua": [-1, 1, 1], "navy": [-1, -1, 0], "blue": [-1, -1, 1], "purple": [0, -1 , 0], "fuchsia": [1, -1, 1]} color_names = list(color_constants.keys()) # Used to generate depth or normal conditioning images @torch.no_grad() def get_conditioning_images(uvp, output_size, render_size=512, blur_filter=5, cond_type="normal"): verts, normals, depths, cos_maps, texels, fragments = uvp.render_geometry(image_size=render_size) masks = normals[...,3][:,None,...] masks = Resize((output_size//8,)*2, antialias=True)(masks) normals_transforms = Compose([ Resize((output_size,)*2, interpolation=InterpolationMode.BILINEAR, antialias=True), GaussianBlur(blur_filter, blur_filter//3+1)] ) if cond_type == "normal": view_normals = uvp.decode_view_normal(normals).permute(0,3,1,2) *2 - 1 conditional_images = normals_transforms(view_normals) # Some problem here, depth controlnet don't work when depth is normalized # But it do generate using the unnormalized form as below elif cond_type == "depth": view_depths = uvp.decode_normalized_depth(depths).permute(0,3,1,2) conditional_images = normals_transforms(view_depths) return conditional_images, masks # Revert time 0 background to time t to composite with time t foreground @torch.no_grad() def composite_rendered_view(scheduler, backgrounds, foregrounds, masks, t): composited_images = [] for i, (background, foreground, mask) in enumerate(zip(backgrounds, foregrounds, masks)): if t > 0: alphas_cumprod = scheduler.alphas_cumprod[t] noise = torch.normal(0, 1, background.shape, device=background.device) background = (1-alphas_cumprod) * noise + alphas_cumprod * background composited = foreground * mask + background * (1-mask) composited_images.append(composited) composited_tensor = torch.stack(composited_images) return composited_tensor # Split into micro-batches to use less memory in each unet prediction # But need more investigation on reducing memory usage # Assume it has no possitive effect and use a large "max_batch_size" to skip splitting def split_groups(attention_mask, max_batch_size, ref_view=[]): group_sets = [] group = set() ref_group = set() idx = 0 while idx < len(attention_mask): new_group = group | set([idx]) new_ref_group = (ref_group | set(attention_mask[idx] + ref_view)) - new_group if len(new_group) + len(new_ref_group) <= max_batch_size: group = new_group ref_group = new_ref_group idx += 1 else: assert len(group) != 0, "Cannot fit into a group" group_sets.append((group, ref_group)) group = set() ref_group = set() if len(group)>0: group_sets.append((group, ref_group)) group_metas = [] for group, ref_group in group_sets: in_mask = sorted(list(group | ref_group)) out_mask = [] group_attention_masks = [] for idx in in_mask: if idx in group: out_mask.append(in_mask.index(idx)) group_attention_masks.append([in_mask.index(idxx) for idxx in attention_mask[idx] if idxx in in_mask]) ref_attention_mask = [in_mask.index(idx) for idx in ref_view] group_metas.append([in_mask, out_mask, group_attention_masks, ref_attention_mask]) return group_metas ''' MultiView-Diffusion Stable-Diffusion Pipeline Modified from a Diffusers StableDiffusionControlNetPipeline Just mimic the pipeline structure but did not follow any API convention ''' class StableSyncMVDPipeline(StableDiffusionControlNetPipeline): def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel]], scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool = False, ): super().__init__( vae, text_encoder, tokenizer, unet, controlnet, scheduler, safety_checker, feature_extractor, requires_safety_checker ) self.scheduler = DDPMScheduler.from_config(self.scheduler.config) self.model_cpu_offload_seq = "vae->text_encoder->unet->vae" self.enable_model_cpu_offload() self.enable_vae_slicing() self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) def initialize_pipeline( self, mesh_path=None, mesh_transform=None, mesh_autouv=None, camera_azims=None, camera_centers=None, top_cameras=True, ref_views=[], latent_size=None, render_rgb_size=None, texture_size=None, texture_rgb_size=None, max_batch_size=24, logging_config=None, ): # Make output dir output_dir = logging_config["output_dir"] self.result_dir = f"{output_dir}/results" self.intermediate_dir = f"{output_dir}/intermediate" dirs = [output_dir, self.result_dir, self.intermediate_dir] for dir_ in dirs: if not os.path.isdir(dir_): os.mkdir(dir_) # Define the cameras for rendering self.camera_poses = [] self.attention_mask=[] self.centers = camera_centers cam_count = len(camera_azims) front_view_diff = 360 back_view_diff = 360 front_view_idx = 0 back_view_idx = 0 for i, azim in enumerate(camera_azims): if azim < 0: azim += 360 self.camera_poses.append((0, azim)) self.attention_mask.append([(cam_count+i-1)%cam_count, i, (i+1)%cam_count]) if abs(azim) < front_view_diff: front_view_idx = i front_view_diff = abs(azim) if abs(azim - 180) < back_view_diff: back_view_idx = i back_view_diff = abs(azim - 180) # Add two additional cameras for painting the top surfaces if top_cameras: self.camera_poses.append((30, 0)) self.camera_poses.append((30, 180)) self.attention_mask.append([front_view_idx, cam_count]) self.attention_mask.append([back_view_idx, cam_count+1]) # Reference view for attention (all views attend the the views in this list) # A forward view will be used if not specified if len(ref_views) == 0: ref_views = [front_view_idx] # Calculate in-group attention mask self.group_metas = split_groups(self.attention_mask, max_batch_size, ref_views) # Set up pytorch3D for projection between screen space and UV space # uvp is for latent and uvp_rgb for rgb color
self.uvp = UVP(texture_size=texture_size, render_size=latent_size, sampling_mode="nearest", channels=4, device=self._execution_device)
0
2023-12-09 03:27:58+00:00
12k
jinxixiang/magic_animate_unofficial
animatediff/magic_animate/unet.py
[ { "identifier": "CrossAttnDownBlock3D", "path": "animatediff/magic_animate/unet_3d_blocks.py", "snippet": "class CrossAttnDownBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attn_num_head_channels=1,\n cross_attention_dim=1280,\n output_scale_factor=1.0,\n downsample_padding=1,\n add_downsample=True,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n\n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n \n use_motion_module=None,\n\n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n resnets = []\n attentions = []\n motion_modules = []\n\n self.has_cross_attention = True\n self.attn_num_head_channels = attn_num_head_channels\n\n for i in range(num_layers):\n in_channels = in_channels if i == 0 else out_channels\n resnets.append(\n ResnetBlock3D(\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n if dual_cross_attention:\n raise NotImplementedError\n attentions.append(\n Transformer3DModel(\n attn_num_head_channels,\n out_channels // attn_num_head_channels,\n in_channels=out_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=out_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n \n self.attentions = nn.ModuleList(attentions)\n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n if add_downsample:\n self.downsamplers = nn.ModuleList(\n [\n Downsample3D(\n out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name=\"op\"\n )\n ]\n )\n else:\n self.downsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(self, hidden_states, temb=None, encoder_hidden_states=None, attention_mask=None):\n output_states = ()\n\n for resnet, attn, motion_module in zip(self.resnets, self.attentions, self.motion_modules):\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module, return_dict=None):\n def custom_forward(*inputs):\n if return_dict is not None:\n return module(*inputs, return_dict=return_dict)\n else:\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(attn, return_dict=False),\n hidden_states,\n encoder_hidden_states,\n )[0]\n if motion_module is not None:\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)\n \n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample\n \n # add motion module\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n\n output_states += (hidden_states,)\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states += (hidden_states,)\n\n return hidden_states, output_states" }, { "identifier": "CrossAttnUpBlock3D", "path": "animatediff/magic_animate/unet_3d_blocks.py", "snippet": "class CrossAttnUpBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n prev_output_channel: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attn_num_head_channels=1,\n cross_attention_dim=1280,\n output_scale_factor=1.0,\n add_upsample=True,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n\n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n \n use_motion_module=None,\n\n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n resnets = []\n attentions = []\n motion_modules = []\n\n self.has_cross_attention = True\n self.attn_num_head_channels = attn_num_head_channels\n\n for i in range(num_layers):\n res_skip_channels = in_channels if (i == num_layers - 1) else out_channels\n resnet_in_channels = prev_output_channel if i == 0 else out_channels\n\n resnets.append(\n ResnetBlock3D(\n in_channels=resnet_in_channels + res_skip_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n if dual_cross_attention:\n raise NotImplementedError\n attentions.append(\n Transformer3DModel(\n attn_num_head_channels,\n out_channels // attn_num_head_channels,\n in_channels=out_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=out_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n \n self.attentions = nn.ModuleList(attentions)\n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n if add_upsample:\n self.upsamplers = nn.ModuleList([Upsample3D(out_channels, use_conv=True, out_channels=out_channels)])\n else:\n self.upsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(\n self,\n hidden_states,\n res_hidden_states_tuple,\n temb=None,\n encoder_hidden_states=None,\n upsample_size=None,\n attention_mask=None,\n ):\n for resnet, attn, motion_module in zip(self.resnets, self.attentions, self.motion_modules):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module, return_dict=None):\n def custom_forward(*inputs):\n if return_dict is not None:\n return module(*inputs, return_dict=return_dict)\n else:\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(attn, return_dict=False),\n hidden_states,\n encoder_hidden_states,\n )[0]\n if motion_module is not None:\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)\n \n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample\n \n # add motion module\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states" }, { "identifier": "DownBlock3D", "path": "animatediff/magic_animate/unet_3d_blocks.py", "snippet": "class DownBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n output_scale_factor=1.0,\n add_downsample=True,\n downsample_padding=1,\n \n use_motion_module=None,\n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n resnets = []\n motion_modules = []\n\n for i in range(num_layers):\n in_channels = in_channels if i == 0 else out_channels\n resnets.append(\n ResnetBlock3D(\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=out_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n \n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n if add_downsample:\n self.downsamplers = nn.ModuleList(\n [\n Downsample3D(\n out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name=\"op\"\n )\n ]\n )\n else:\n self.downsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(self, hidden_states, temb=None, encoder_hidden_states=None):\n output_states = ()\n\n for resnet, motion_module in zip(self.resnets, self.motion_modules):\n if self.training and self.gradient_checkpointing:\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)\n if motion_module is not None:\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)\n else:\n hidden_states = resnet(hidden_states, temb)\n\n # add motion module\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n\n output_states += (hidden_states,)\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states += (hidden_states,)\n\n return hidden_states, output_states" }, { "identifier": "UNetMidBlock3DCrossAttn", "path": "animatediff/magic_animate/unet_3d_blocks.py", "snippet": "class UNetMidBlock3DCrossAttn(nn.Module):\n def __init__(\n self,\n in_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attn_num_head_channels=1,\n output_scale_factor=1.0,\n cross_attention_dim=1280,\n dual_cross_attention=False,\n use_linear_projection=False,\n upcast_attention=False,\n\n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n\n use_motion_module=None,\n \n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n\n self.has_cross_attention = True\n self.attn_num_head_channels = attn_num_head_channels\n resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)\n\n # there is always at least one resnet\n resnets = [\n ResnetBlock3D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n ]\n attentions = []\n motion_modules = []\n\n for _ in range(num_layers):\n if dual_cross_attention:\n raise NotImplementedError\n attentions.append(\n Transformer3DModel(\n attn_num_head_channels,\n in_channels // attn_num_head_channels,\n in_channels=in_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=in_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n resnets.append(\n ResnetBlock3D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n\n self.attentions = nn.ModuleList(attentions)\n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n def forward(self, hidden_states, temb=None, encoder_hidden_states=None, attention_mask=None):\n hidden_states = self.resnets[0](hidden_states, temb)\n for attn, resnet, motion_module in zip(self.attentions, self.resnets[1:], self.motion_modules):\n hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n hidden_states = resnet(hidden_states, temb)\n\n return hidden_states" }, { "identifier": "UpBlock3D", "path": "animatediff/magic_animate/unet_3d_blocks.py", "snippet": "class UpBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n prev_output_channel: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n output_scale_factor=1.0,\n add_upsample=True,\n\n use_motion_module=None,\n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n resnets = []\n motion_modules = []\n\n for i in range(num_layers):\n res_skip_channels = in_channels if (i == num_layers - 1) else out_channels\n resnet_in_channels = prev_output_channel if i == 0 else out_channels\n\n resnets.append(\n ResnetBlock3D(\n in_channels=resnet_in_channels + res_skip_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=out_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n\n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n if add_upsample:\n self.upsamplers = nn.ModuleList([Upsample3D(out_channels, use_conv=True, out_channels=out_channels)])\n else:\n self.upsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None, encoder_hidden_states=None,):\n for resnet, motion_module in zip(self.resnets, self.motion_modules):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n if self.training and self.gradient_checkpointing:\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)\n if motion_module is not None:\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)\n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states" }, { "identifier": "get_down_block", "path": "animatediff/magic_animate/unet_3d_blocks.py", "snippet": "def get_down_block(\n down_block_type,\n num_layers,\n in_channels,\n out_channels,\n temb_channels,\n add_downsample,\n resnet_eps,\n resnet_act_fn,\n attn_num_head_channels,\n resnet_groups=None,\n cross_attention_dim=None,\n downsample_padding=None,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n resnet_time_scale_shift=\"default\",\n \n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n \n use_motion_module=None,\n \n motion_module_type=None,\n motion_module_kwargs=None,\n):\n down_block_type = down_block_type[7:] if down_block_type.startswith(\"UNetRes\") else down_block_type\n if down_block_type == \"DownBlock3D\":\n return DownBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n use_motion_module=use_motion_module,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n elif down_block_type == \"CrossAttnDownBlock3D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnDownBlock3D\")\n return CrossAttnDownBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attn_num_head_channels,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n \n use_motion_module=use_motion_module,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n raise ValueError(f\"{down_block_type} does not exist.\")" }, { "identifier": "get_up_block", "path": "animatediff/magic_animate/unet_3d_blocks.py", "snippet": "def get_up_block(\n up_block_type,\n num_layers,\n in_channels,\n out_channels,\n prev_output_channel,\n temb_channels,\n add_upsample,\n resnet_eps,\n resnet_act_fn,\n attn_num_head_channels,\n resnet_groups=None,\n cross_attention_dim=None,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n resnet_time_scale_shift=\"default\",\n\n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n \n use_motion_module=None,\n motion_module_type=None,\n motion_module_kwargs=None,\n):\n up_block_type = up_block_type[7:] if up_block_type.startswith(\"UNetRes\") else up_block_type\n if up_block_type == \"UpBlock3D\":\n return UpBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n use_motion_module=use_motion_module,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n elif up_block_type == \"CrossAttnUpBlock3D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnUpBlock3D\")\n return CrossAttnUpBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attn_num_head_channels,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n\n use_motion_module=use_motion_module,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n raise ValueError(f\"{up_block_type} does not exist.\")" }, { "identifier": "InflatedConv3d", "path": "animatediff/magic_animate/resnet.py", "snippet": "class InflatedConv3d(nn.Conv2d):\n def forward(self, x):\n video_length = x.shape[2]\n\n x = rearrange(x, \"b c f h w -> (b f) c h w\")\n x = super().forward(x)\n x = rearrange(x, \"(b f) c h w -> b c f h w\", f=video_length)\n\n return x" } ]
from dataclasses import dataclass from typing import List, Optional, Tuple, Union from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.models.modeling_utils import ModelMixin from diffusers.utils import BaseOutput, logging from diffusers.models.embeddings import TimestepEmbedding, Timesteps from .unet_3d_blocks import ( CrossAttnDownBlock3D, CrossAttnUpBlock3D, DownBlock3D, UNetMidBlock3DCrossAttn, UpBlock3D, get_down_block, get_up_block, ) from .resnet import InflatedConv3d from diffusers.utils import WEIGHTS_NAME import os import json import pdb import torch import torch.nn as nn import torch.utils.checkpoint
9,127
# up reversed_block_out_channels = list(reversed(block_out_channels)) reversed_attention_head_dim = list(reversed(attention_head_dim)) only_cross_attention = list(reversed(only_cross_attention)) output_channel = reversed_block_out_channels[0] for i, up_block_type in enumerate(up_block_types): res = 2 ** (3 - i) is_final_block = i == len(block_out_channels) - 1 prev_output_channel = output_channel output_channel = reversed_block_out_channels[i] input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)] # add upsample block for all BUT final layer if not is_final_block: add_upsample = True self.num_upsamplers += 1 else: add_upsample = False up_block = get_up_block( up_block_type, num_layers=layers_per_block + 1, in_channels=input_channel, out_channels=output_channel, prev_output_channel=prev_output_channel, temb_channels=time_embed_dim, add_upsample=add_upsample, resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, cross_attention_dim=cross_attention_dim, attn_num_head_channels=reversed_attention_head_dim[i], dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention[i], upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, unet_use_cross_frame_attention=unet_use_cross_frame_attention, unet_use_temporal_attention=unet_use_temporal_attention, use_motion_module=use_motion_module and (res in motion_module_resolutions), motion_module_type=motion_module_type, motion_module_kwargs=motion_module_kwargs, ) self.up_blocks.append(up_block) prev_output_channel = output_channel # out self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps) self.conv_act = nn.SiLU() self.conv_out = InflatedConv3d(block_out_channels[0], out_channels, kernel_size=3, padding=1) def set_attention_slice(self, slice_size): r""" Enable sliced attention computation. When this option is enabled, the attention module will split the input tensor in slices, to compute attention in several steps. This is useful to save some memory in exchange for a small speed decrease. Args: slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`): When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If `"max"`, maxium amount of memory will be saved by running only one slice at a time. If a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim` must be a multiple of `slice_size`. """ sliceable_head_dims = [] def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module): if hasattr(module, "set_attention_slice"): sliceable_head_dims.append(module.sliceable_head_dim) for child in module.children(): fn_recursive_retrieve_slicable_dims(child) # retrieve number of attention layers for module in self.children(): fn_recursive_retrieve_slicable_dims(module) num_slicable_layers = len(sliceable_head_dims) if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory slice_size = [dim // 2 for dim in sliceable_head_dims] elif slice_size == "max": # make smallest slice possible slice_size = num_slicable_layers * [1] slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size if len(slice_size) != len(sliceable_head_dims): raise ValueError( f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different" f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}." ) for i in range(len(slice_size)): size = slice_size[i] dim = sliceable_head_dims[i] if size is not None and size > dim: raise ValueError(f"size {size} has to be smaller or equal to {dim}.") # Recursively walk through all the children. # Any children which exposes the set_attention_slice method # gets the message def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]): if hasattr(module, "set_attention_slice"): module.set_attention_slice(slice_size.pop()) for child in module.children(): fn_recursive_set_attention_slice(child, slice_size) reversed_slice_size = list(reversed(slice_size)) for module in self.children(): fn_recursive_set_attention_slice(module, reversed_slice_size) def _set_gradient_checkpointing(self, module, value=False):
# ************************************************************************* # This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo- # difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B- # ytedance Inc.. # ************************************************************************* # Adapted from https://github.com/guoyww/AnimateDiff # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class UNet3DConditionOutput(BaseOutput): sample: torch.FloatTensor class UNet3DConditionModel(ModelMixin, ConfigMixin): _supports_gradient_checkpointing = True @register_to_config def __init__( self, sample_size: Optional[int] = None, in_channels: int = 4, out_channels: int = 4, center_input_sample: bool = False, flip_sin_to_cos: bool = True, freq_shift: int = 0, down_block_types: Tuple[str] = ( "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D", ), mid_block_type: str = "UNetMidBlock3DCrossAttn", up_block_types: Tuple[str] = ( "UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D" ), only_cross_attention: Union[bool, Tuple[bool]] = False, block_out_channels: Tuple[int] = (320, 640, 1280, 1280), layers_per_block: int = 2, downsample_padding: int = 1, mid_block_scale_factor: float = 1, act_fn: str = "silu", norm_num_groups: int = 32, norm_eps: float = 1e-5, cross_attention_dim: int = 1280, attention_head_dim: Union[int, Tuple[int]] = 8, dual_cross_attention: bool = False, use_linear_projection: bool = False, class_embed_type: Optional[str] = None, num_class_embeds: Optional[int] = None, upcast_attention: bool = False, resnet_time_scale_shift: str = "default", # Additional use_motion_module = False, motion_module_resolutions = ( 1,2,4,8 ), motion_module_mid_block = False, motion_module_decoder_only = False, motion_module_type = None, motion_module_kwargs = {}, unet_use_cross_frame_attention = None, unet_use_temporal_attention = None, ): super().__init__() self.sample_size = sample_size time_embed_dim = block_out_channels[0] * 4 # input self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1)) # time self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) timestep_input_dim = block_out_channels[0] self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim) # class embedding if class_embed_type is None and num_class_embeds is not None: self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim) elif class_embed_type == "timestep": self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim) elif class_embed_type == "identity": self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim) else: self.class_embedding = None self.down_blocks = nn.ModuleList([]) self.mid_block = None self.up_blocks = nn.ModuleList([]) if isinstance(only_cross_attention, bool): only_cross_attention = [only_cross_attention] * len(down_block_types) if isinstance(attention_head_dim, int): attention_head_dim = (attention_head_dim,) * len(down_block_types) # down output_channel = block_out_channels[0] for i, down_block_type in enumerate(down_block_types): res = 2 ** i input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 down_block = get_down_block( down_block_type, num_layers=layers_per_block, in_channels=input_channel, out_channels=output_channel, temb_channels=time_embed_dim, add_downsample=not is_final_block, resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, cross_attention_dim=cross_attention_dim, attn_num_head_channels=attention_head_dim[i], downsample_padding=downsample_padding, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention[i], upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, unet_use_cross_frame_attention=unet_use_cross_frame_attention, unet_use_temporal_attention=unet_use_temporal_attention, use_motion_module=use_motion_module and (res in motion_module_resolutions) and (not motion_module_decoder_only), motion_module_type=motion_module_type, motion_module_kwargs=motion_module_kwargs, ) self.down_blocks.append(down_block) # mid if mid_block_type == "UNetMidBlock3DCrossAttn": self.mid_block = UNetMidBlock3DCrossAttn( in_channels=block_out_channels[-1], temb_channels=time_embed_dim, resnet_eps=norm_eps, resnet_act_fn=act_fn, output_scale_factor=mid_block_scale_factor, resnet_time_scale_shift=resnet_time_scale_shift, cross_attention_dim=cross_attention_dim, attn_num_head_channels=attention_head_dim[-1], resnet_groups=norm_num_groups, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention, unet_use_cross_frame_attention=unet_use_cross_frame_attention, unet_use_temporal_attention=unet_use_temporal_attention, use_motion_module=use_motion_module and motion_module_mid_block, motion_module_type=motion_module_type, motion_module_kwargs=motion_module_kwargs, ) else: raise ValueError(f"unknown mid_block_type : {mid_block_type}") # count how many layers upsample the videos self.num_upsamplers = 0 # up reversed_block_out_channels = list(reversed(block_out_channels)) reversed_attention_head_dim = list(reversed(attention_head_dim)) only_cross_attention = list(reversed(only_cross_attention)) output_channel = reversed_block_out_channels[0] for i, up_block_type in enumerate(up_block_types): res = 2 ** (3 - i) is_final_block = i == len(block_out_channels) - 1 prev_output_channel = output_channel output_channel = reversed_block_out_channels[i] input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)] # add upsample block for all BUT final layer if not is_final_block: add_upsample = True self.num_upsamplers += 1 else: add_upsample = False up_block = get_up_block( up_block_type, num_layers=layers_per_block + 1, in_channels=input_channel, out_channels=output_channel, prev_output_channel=prev_output_channel, temb_channels=time_embed_dim, add_upsample=add_upsample, resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, cross_attention_dim=cross_attention_dim, attn_num_head_channels=reversed_attention_head_dim[i], dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention[i], upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, unet_use_cross_frame_attention=unet_use_cross_frame_attention, unet_use_temporal_attention=unet_use_temporal_attention, use_motion_module=use_motion_module and (res in motion_module_resolutions), motion_module_type=motion_module_type, motion_module_kwargs=motion_module_kwargs, ) self.up_blocks.append(up_block) prev_output_channel = output_channel # out self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps) self.conv_act = nn.SiLU() self.conv_out = InflatedConv3d(block_out_channels[0], out_channels, kernel_size=3, padding=1) def set_attention_slice(self, slice_size): r""" Enable sliced attention computation. When this option is enabled, the attention module will split the input tensor in slices, to compute attention in several steps. This is useful to save some memory in exchange for a small speed decrease. Args: slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`): When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If `"max"`, maxium amount of memory will be saved by running only one slice at a time. If a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim` must be a multiple of `slice_size`. """ sliceable_head_dims = [] def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module): if hasattr(module, "set_attention_slice"): sliceable_head_dims.append(module.sliceable_head_dim) for child in module.children(): fn_recursive_retrieve_slicable_dims(child) # retrieve number of attention layers for module in self.children(): fn_recursive_retrieve_slicable_dims(module) num_slicable_layers = len(sliceable_head_dims) if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory slice_size = [dim // 2 for dim in sliceable_head_dims] elif slice_size == "max": # make smallest slice possible slice_size = num_slicable_layers * [1] slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size if len(slice_size) != len(sliceable_head_dims): raise ValueError( f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different" f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}." ) for i in range(len(slice_size)): size = slice_size[i] dim = sliceable_head_dims[i] if size is not None and size > dim: raise ValueError(f"size {size} has to be smaller or equal to {dim}.") # Recursively walk through all the children. # Any children which exposes the set_attention_slice method # gets the message def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]): if hasattr(module, "set_attention_slice"): module.set_attention_slice(slice_size.pop()) for child in module.children(): fn_recursive_set_attention_slice(child, slice_size) reversed_slice_size = list(reversed(slice_size)) for module in self.children(): fn_recursive_set_attention_slice(module, reversed_slice_size) def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)):
0
2023-12-12 00:16:39+00:00
12k
Chat-3D/Chat-3D-v2
others/process_vil3dref_results.py
[ { "identifier": "Chat3D", "path": "models/chat3d.py", "snippet": "class Chat3D(nn.Module):\n \"\"\"\n VideoChat model.\n \"\"\"\n def __init__(self, config):\n super().__init__()\n llama_model_path = config.get(\"llama_model_path\")\n low_resource = config.get(\"low_resource\", False)\n # prompt\n self.prompt_template = config.get(\"prompt_template\", \"\")\n self.max_txt_len = config.get(\"max_txt_len\", 32)\n self.end_sym = config.get(\"end_sym\", '\\n')\n self.system_path = config.get(\"system_path\", \"\")\n self.begin_signal = \"###\"\n self.role = (\"Human\", \"Assistant\")\n self.pc_start_token, self.pc_end_token = \"<Target>\", \"</Target>\"\n self.scene_start_token, self.scene_end_token = \"<Scene>\", \"</Scene>\"\n self.add_scene_token = config.get(\"add_scene_token\", True)\n self.debug = config.get(\"debug\", False)\n self.obj_norm_scale = config.get(\"obj_norm_scale\", 1)\n self.scene_norm_scale = config.get(\"scene_norm_scale\", 1)\n self.grad_scale = config.get(\"grad_scale\", 1)\n\n mlp_dropout = config.get(\"mlp_dropout\", 0.5)\n self.stage = config.get(\"stage\", 1)\n\n self.low_resource = low_resource\n\n self.input_dim = config.get(\"input_dim\", 512)\n self.attr_dim = config.get(\"attr_dim\", 512)\n self.inter_dim = self.input_dim + self.attr_dim * 2\n\n if not self.debug:\n logger.info('Loading LLAMA')\n self.llama_tokenizer = LlamaTokenizer.from_pretrained(llama_model_path, use_fast=False)\n self.llama_tokenizer.pad_token = self.llama_tokenizer.eos_token\n if self.low_resource:\n self.llama_model = LlamaForCausalLM.from_pretrained(\n llama_model_path,\n torch_dtype=torch.float16,\n load_in_8bit=True,\n device_map=\"auto\"\n )\n else:\n self.llama_model = LlamaForCausalLM.from_pretrained(\n llama_model_path,\n torch_dtype=torch.float16,\n )\n logger.info(\"freeze LLAMA\")\n for name, param in self.llama_model.named_parameters():\n param.requires_grad = False\n # if self.stage != 1:\n # for layer_ind in range(30, 32):\n # for param in self.llama_model.model.layers[layer_ind].parameters():\n # param.requires_grad = True\n # param.data = param.data.float()\n self.llama_dim = self.llama_model.config.hidden_size\n logger.info('Loading LLAMA Done')\n else:\n self.llama_model = None\n self.llama_dim = 4096\n\n # self.object_input_proj = nn.Sequential(\n # nn.Linear(self.input_dim, self.input_dim),\n # # nn.ReLU(),\n # # nn.LayerNorm(self.input_dim),\n # )\n self.coord_proj = nn.Sequential(\n nn.Linear(3, self.attr_dim),\n # nn.ReLU(),\n # nn.LayerNorm(self.attr_dim),\n # nn.Dropout(mlp_dropout)\n )\n self.color_proj = nn.Sequential(\n nn.Linear(3, self.attr_dim),\n # nn.ReLU(),\n # nn.LayerNorm(self.attr_dim),\n # nn.Dropout(mlp_dropout)\n )\n # self.color_dropout = nn.Dropout(mlp_dropout)\n # self.pos_proj = nn.Sequential(\n # nn.Linear(6, self.inter_dim),\n # nn.LayerNorm(self.inter_dim)\n # )\n # self.pos_embedding = PositionalEmbedding(dim=self.llama_dim)\n self.pos_proj = nn.Sequential(\n nn.Linear(3, self.llama_dim)\n )\n self.object_proj = nn.Sequential(\n nn.Linear(self.inter_dim, self.llama_dim),\n nn.GELU(),\n nn.Dropout(mlp_dropout),\n nn.LayerNorm(self.llama_dim),\n nn.Linear(self.llama_dim, self.llama_dim)\n )\n self.scene_proj = nn.Sequential(\n nn.Linear(self.llama_dim, self.llama_dim),\n )\n self.encoder_num_layers = int(config.get(\"encoder_num_layers\", 1))\n self.relation_module = CMT(hidden_size=self.llama_dim, num_layers=self.encoder_num_layers)\n # self.cls_head = nn.Sequential(\n # nn.Linear(self.llama_dim, 40)\n # )\n\n if self.stage == 1:\n for p in self.relation_module.parameters():\n p.requires_grad = False\n for p in self.scene_proj.parameters():\n p.requires_grad = False\n for p in self.pos_proj.parameters():\n p.requires_grad = False\n # for p in self.pos_embedding.parameters():\n # p.requires_grad = False\n # for p in self.relation_module.parameters():\n # p.requires_grad = False\n # else:\n # for p in self.size_color_proj.parameters():\n # p.requires_grad = False\n # for p in self.scene_proj.parameters():\n # p.requires_grad = False\n # else:\n # for p in self.size_color_proj.parameters():\n # p.requires_grad = False\n # for p in self.scene_proj.parameters():\n # p.requires_grad = False\n\n with open(self.system_path, \"r\") as f:\n self.system = \"\\n\".join([x.strip() for x in f.readlines()])\n\n if not self.debug:\n self.object_norm = torch.norm(self.get_text_emb(\"object\"), p=2)\n self.relation_norm = torch.norm(self.get_text_emb(\"relation\"), p=2)\n self.position_norm = torch.norm(self.get_text_emb(\"position\"), p=2)\n if self.stage != 1:\n self.object_list_embed, self.object_list_ind = self.prepare_object_list()\n self.p_0_embed, self.p_1_embed = self.prepare_system_embed()\n\n # def process_prompt(self, prompt_path, prompt_template):\n # with open(prompt_path, 'r') as f:\n # prompt_candidates = f.read().splitlines()\n # with open(self.system_path, \"r\") as f:\n # system = \"\\n\".join([x.strip() for x in f.readlines()])\n # prompt_list = [system + \" \" + prompt_template.format(p) for p in prompt_candidates]\n # logger.info(f'Load {len(prompt_list)} training prompts')\n # logger.info(f'Prompt: {prompt_list}')\n # return prompt_list\n\n # def prompt_wrap(self, scene_embed, scene_mask, prompts, is_eval=False):\n # batch_size = scene_embed.shape[0]\n # for i, prompt in enumerate(prompts):\n # p_0, p_1 = prompt.split('<REPLACE>')\n # p_0_tokens = self.llama_tokenizer(p_0, return_tensors=\"pt\", add_special_tokens=is_eval).to(scene_embed.device)\n # p_1_tokens = self.llama_tokenizer(p_1, return_tensors=\"pt\", add_special_tokens=False).to(scene_embed.device)\n # # p_2_tokens = self.llama_tokenizer(p_2, return_tensors=\"pt\", add_special_tokens=False).to(pc_embed.device)\n # p_0_embeds = self.llama_model.model.embed_tokens(p_0_tokens.input_ids).expand(batch_size, -1, -1)\n # p_1_embeds = self.llama_model.model.embed_tokens(p_1_tokens.input_ids).expand(batch_size, -1, -1)\n # p_2_embeds = self.llama_model.model.embed_tokens(p_2_tokens.input_ids).expand(batch_size, -1, -1)\n # wrapped_embeds = torch.cat([p_0_embeds, pc_embed, p_1_embeds, scene_embed, p_2_embeds], dim=1)\n # wrapped_atts = scene_attn[:, :1].expand(-1, wrapped_embeds.shape[1])\n # return wrapped_embeds, wrapped_atts\n\n # def get_object_list_embed(self, scene_embed, scene_mask):\n # # scene_embed: (obj_num, dim)\n # embed_list = []\n # for i in range(scene_embed.shape[0]):\n # if scene_mask[i] == 0:\n # break\n # text = \"\"\n # if i > 0:\n # text += \", \"\n # text += f\"obj{i:02}: \"\n # text_embeds = self.get_text_emb(text, scene_embed.device).detach()\n # embed_list.extend([text_embeds.squeeze(0), scene_embed[i:i+1]])\n # return torch.cat(embed_list, dim=0)\n\n def prepare_object_list(self, max_obj_num=150):\n tmp_id = 0\n embed_list = []\n obj_index_list = []\n for i in range(max_obj_num):\n text = \"\" if i == 0 else \"; \"\n text += f\"obj{i:02} \"\n text_embeds = self.get_text_emb(text).squeeze(0)\n tmp_id += text_embeds.shape[0]\n obj_index_list.append(tmp_id)\n if self.add_scene_token:\n embed_list.extend([text_embeds, torch.zeros((2, text_embeds.shape[-1]))])\n tmp_id += 2\n else:\n embed_list.extend([text_embeds, torch.zeros((1, text_embeds.shape[-1]))])\n tmp_id += 1\n return torch.cat(embed_list, dim=0), obj_index_list\n\n def prepare_system_embed(self):\n prompt = self.system + \" \"\n p_0, p_1 = prompt.split(\"<REPLACE>\")\n p_0_token = self.llama_tokenizer(p_0, return_tensors=\"pt\", add_special_tokens=False)\n p_1_token = self.llama_tokenizer(p_1, return_tensors=\"pt\", add_special_tokens=False)\n p_0_embed = self.llama_model.model.embed_tokens(p_0_token.input_ids).squeeze(0)\n p_1_embed = self.llama_model.model.embed_tokens(p_1_token.input_ids).squeeze(0)\n return p_0_embed, p_1_embed\n\n def get_text_emb(self, text, device=\"cpu\"):\n text_tokens = self.llama_tokenizer(text, return_tensors=\"pt\", add_special_tokens=False).to(device)\n return self.llama_model.model.embed_tokens(text_tokens.input_ids)\n\n def encode_object_feat(self, feat, locs, colors):\n # feat = self.object_input_proj(feat)\n size_emb = self.coord_proj(locs[:, :, 3:6])\n gmm_weights = colors[..., :1]\n gmm_means = colors[..., 1:]\n gmm_colors = torch.sum(gmm_weights * gmm_means, dim=2)\n # color_emb = self.color_dropout(torch.sum(self.color_proj(gmm_means) * gmm_weights, dim=2))\n color_emb = self.color_proj(gmm_colors)\n feat = torch.cat([feat, size_emb, color_emb], dim=-1)\n # feat = torch.cat([feat, size_emb], dim=-1)\n # feat = self.scene_proj(feat)\n return feat\n\n @staticmethod\n def get_dist_attention(pos, dist_exp=1):\n # pos (bs, obj_num, 3)\n dist = pos.unsqueeze(1) - pos.unsqueeze(2)\n dist = torch.sum(dist.abs()**dist_exp, dim=-1)\n dist_attn = torch.nn.functional.softmax(-dist, dim=-1)\n return dist_attn\n\n def insert_object_embed(self, embed_1, embed_2, scene_mask, detach_mask=None):\n if detach_mask is not None:\n embed_1_detached = CustomGradLayer.apply(embed_1[detach_mask], self.grad_scale)\n embed_1[detach_mask] = embed_1_detached\n if embed_2 is not None:\n embed_2_detached = CustomGradLayer.apply(embed_2[detach_mask], self.grad_scale)\n embed_2[detach_mask] = embed_2_detached\n obj_num = int(scene_mask.sum())\n mx_ind = self.object_list_ind[obj_num - 1] + (2 if self.add_scene_token else 1)\n object_list_embed = self.object_list_embed[:mx_ind, :].to(embed_1.device)\n object_list_ind = torch.tensor(self.object_list_ind[:obj_num], dtype=torch.long)\\\n .to(embed_1.device)\n object_list_embed[object_list_ind] = embed_1[scene_mask.bool()].to(object_list_embed.dtype)\n if self.add_scene_token:\n object_list_embed[object_list_ind+1] = embed_2[scene_mask.bool()].to(object_list_embed.dtype)\n return object_list_embed\n\n def forward_stage1(self, scene_feat, scene_locs, scene_colors, target_captions, is_eval=False, **kwargs):\n object_embed = self.encode_object_feat(scene_feat, scene_locs, scene_colors)\n proj_object_embed = self.object_proj(object_embed)\n proj_object_embed = proj_object_embed.squeeze(1)\n # cls_output = self.cls_head(proj_object_embed)\n # cls_loss = F.cross_entropy(cls_output, target_clses)\n # cls_acc = (cls_output.max(dim=-1)[1] == target_clses).float().mean()\n norm_object_embed = torch.nn.functional.normalize(proj_object_embed, dim=-1) * self.obj_norm_scale\n target_embeds = []\n for target_caption in target_captions:\n target_tokens = self.llama_tokenizer(\n target_caption,\n return_tensors=\"pt\",\n padding=\"longest\",\n truncation=True,\n max_length=self.max_txt_len,\n add_special_tokens=False\n ).to(norm_object_embed.device)\n token_mask = target_tokens[\"attention_mask\"].unsqueeze(-1)\n target_embed = self.llama_model.model.embed_tokens(target_tokens.input_ids) # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n target_embed = (target_embed * token_mask).sum(1) / token_mask.sum(1)\n target_embed = target_embed.mean(dim=0)\n target_embeds.append(target_embed)\n target_embeds = torch.stack(target_embeds, dim=0).to(norm_object_embed.device)\n cosine_loss = F.cosine_embedding_loss(norm_object_embed, target_embeds.detach(), torch.tensor([1]).to(norm_object_embed.device))\n l2_loss = F.mse_loss(proj_object_embed, target_embeds.detach())\n # print(torch.norm(pc_embed[:1], p=2), torch.norm(target_embeds[:1], p=2))\n loss = cosine_loss\n return dict(\n loss=loss,\n cosine_loss=cosine_loss,\n # cls_loss=cls_loss,\n l2_loss=l2_loss,\n # cls_acc=cls_acc.detach().cpu(),\n cosine_score=1. - cosine_loss.detach().cpu(),\n obj_norm=proj_object_embed.norm(dim=-1).mean().detach().cpu(),\n target_norm=target_embeds.norm(dim=-1).mean().detach().cpu(),\n l2_dis=l2_loss.detach().cpu()\n )\n\n def forward_stage2(self, scene_feat, scene_locs, scene_colors, scene_mask, detach_mask, obj_ids, questions, answers, is_eval=False, **kwargs):\n object_embed = self.encode_object_feat(scene_feat, scene_locs, scene_colors)\n device = object_embed.device\n batch_size = object_embed.shape[0]\n proj_object_embed = self.object_proj(object_embed)\n norm_object_embed = torch.nn.functional.normalize(proj_object_embed, dim=-1) * self.obj_norm_scale\n # norm_object_embed = proj_object_embed\n proj_scene_embed = None\n if self.add_scene_token: # remember to change the evaluate !!!!!!!!!!!!!!!!!!!!!!(&*&^^#@$%##$%&(*^&%^$%@\n pos_embed = self.pos_proj(scene_locs[:, :, :3])\n scene_embed = proj_object_embed.detach() + 0.1 * pos_embed\n\n # scene_embed = scene_embed.mean(dim=1, keepdim=True).repeat(1, scene_embed.shape[1], 1)\n # proj_scene_embed = scene_embed - proj_object_embed\n\n scene_embed = self.relation_module(scene_embed, scene_locs, scene_mask.bool())\n proj_scene_embed = scene_embed - proj_object_embed.detach()\n\n norm_scene_embed = torch.nn.functional.normalize(proj_scene_embed, dim=-1) * self.scene_norm_scale\n # norm_scene_embed = proj_scene_embed\n input_embed_list, attn_list, target_list = [], [], []\n max_seq_len = 0\n for i, question in enumerate(questions):\n prompt = self.prompt_template.format(question)\n prompt_token = self.llama_tokenizer(prompt, return_tensors=\"pt\", add_special_tokens=False).to(device)\n prompt_embed = self.llama_model.model.embed_tokens(prompt_token.input_ids).detach().squeeze(0)\n # object_list_embed = self.get_object_list_embed(scene_embed[i], scene_mask[i])\n detach_mask = None\n object_list_embed = self.insert_object_embed(norm_object_embed[i], norm_scene_embed[i] if self.add_scene_token else None, scene_mask[i], detach_mask[i] if detach_mask is not None else None)\n # for j in range(obj_num):\n # start_ind = self.object_list_ind[j]\n # assert object_list_embed[start_ind].abs().sum() < 1e-6, (start_ind, object_list_embed[start_ind].sum())\n # assert object_list_embed[start_ind+1].abs().sum() < 1e-6, (start_ind+1, object_list_embed[start_ind+1].sum())\n # object_list_embed[start_ind:start_ind+1, :] = scene_embed[i][j]\n # object_list_embed[start_ind+1:start_ind+2, :] = pos_embed[i][j]\n\n p_0_embed = self.p_0_embed.to(device)\n p_1_embed = self.p_1_embed.to(device)\n\n wrapped_embed = torch.cat([p_0_embed, object_list_embed, p_1_embed, prompt_embed], dim=0)\n wrapped_attn = torch.ones(wrapped_embed.size()[:-1], dtype=torch.long).to(wrapped_embed.device)\n empty_target = (\n torch.ones([wrapped_attn.shape[0]+1], dtype=torch.long).to(device).fill_(-100)\n )\n\n answer = answers[i] + self.end_sym\n to_regress_token = self.llama_tokenizer(answer, return_tensors=\"pt\").to(device)\n # breakpoint()\n answer_target = to_regress_token.input_ids.masked_fill(\n to_regress_token.input_ids == self.llama_tokenizer.pad_token_id, -100\n ).squeeze(0)\n to_regress_embed = self.llama_model.model.embed_tokens(to_regress_token.input_ids).squeeze(0)\n\n target = torch.cat([empty_target, answer_target], dim=0)\n bos = torch.ones([1], dtype=to_regress_token.input_ids.dtype, device=to_regress_token.input_ids.device) * self.llama_tokenizer.bos_token_id\n bos_embed = self.llama_model.model.embed_tokens(bos)\n bos_attn = wrapped_attn[:1]\n input_embed = torch.cat([bos_embed, wrapped_embed, to_regress_embed], dim=0)\n attn = torch.cat([bos_attn, wrapped_attn, to_regress_token.attention_mask[0]], dim=0)\n input_embed_list.append(input_embed)\n attn_list.append(attn)\n target_list.append(target)\n max_seq_len = max(max_seq_len, target.shape[0])\n\n dim = norm_object_embed.shape[2]\n\n input_embeds = torch.zeros([batch_size, max_seq_len, dim], dtype=input_embed_list[0].dtype).to(device)\n attention_mask = torch.zeros([batch_size, max_seq_len], dtype=attn_list[0].dtype).to(device)\n targets = torch.zeros([batch_size, max_seq_len], dtype=target_list[0].dtype).to(device).fill_(-100)\n for i in range(len(input_embed_list)):\n input_embed = input_embed_list[i]\n attn = attn_list[i]\n target = target_list[i]\n input_embeds[i, :input_embed.shape[0], :] = input_embed\n attention_mask[i, :attn.shape[0]] = attn\n targets[i, :target.shape[0]] = target\n\n with self.maybe_autocast():\n outputs = self.llama_model(\n inputs_embeds=input_embeds,\n attention_mask=attention_mask,\n return_dict=True,\n labels=targets,\n )\n\n return dict(\n loss=outputs.loss,\n obj_norm=proj_object_embed.norm(dim=-1).mean().detach().cpu(),\n scene_norm=proj_scene_embed.norm(dim=-1).mean().detach().cpu() if proj_scene_embed is not None else 0.\n )\n\n def forward_stage3(self, scene_feat, scene_attr, scene_mask, target_id, conversations, is_eval=False, **kwargs):\n batch_size, obj_num, _ = scene_feat.shape\n scene_feat = self.encode_and_project(scene_feat, scene_attr)\n pc_embed = torch.gather(scene_feat, 1, target_id.unsqueeze(1).unsqueeze(2).expand(-1, -1, scene_feat.shape[-1]))\n if self.encoder_num_layers > 0:\n scene_feat = self.relation_module(scene_feat, mask=(~scene_mask.bool()).unsqueeze(1).expand(-1, obj_num, -1).unsqueeze(1))\n\n scene_embed = scene_feat * scene_mask.unsqueeze(-1)\n # scene_attn = torch.ones(scene_embed.size()[:-1], dtype=torch.long).to(scene_embed.device)\n max_len = 0\n input_embed_list = []\n p_0_len_list, p_1_len_list = [], []\n target_list = []\n for idx, prompt in enumerate(conversations):\n tmp_scene_embed = scene_embed[idx:idx+1]\n tmp_pc_embed = pc_embed[idx:idx+1]\n p_0, p_ = prompt.split(\"<TargetHere>\")\n p_1, p_2 = p_.split(\"<SceneHere>\")\n p_1 = self.pc_end_token + p_1\n p_0_tokens = self.llama_tokenizer(p_0, return_tensors=\"pt\", add_special_tokens=is_eval).to(tmp_pc_embed.device)\n p_1_tokens = self.llama_tokenizer(p_1, return_tensors=\"pt\", add_special_tokens=False).to(tmp_pc_embed.device)\n p_2_tokens = self.llama_tokenizer(p_2, return_tensors=\"pt\", add_special_tokens=False).to(tmp_pc_embed.device)\n p_0_embeds = self.llama_model.model.embed_tokens(p_0_tokens.input_ids)\n p_1_embeds = self.llama_model.model.embed_tokens(p_1_tokens.input_ids)\n p_2_embeds = self.llama_model.model.embed_tokens(p_2_tokens.input_ids)\n input_embeds = torch.cat([p_0_embeds, tmp_pc_embed, p_1_embeds, tmp_scene_embed, p_2_embeds], dim=1)\n\n sep1 = self.begin_signal + self.role[0] + \":\"\n sep2 = self.begin_signal + self.role[1] + \":\"\n raw_text = p_2.split(sep2)\n for _idx in range(1, len(raw_text)):\n raw_text[_idx] = sep2 + raw_text[_idx]\n answer_targets = p_2_tokens.input_ids.clone()\n system = raw_text[0].split(sep1)[0]\n system_len = self._get_text_len(system.rstrip())\n sep_len = self._get_text_len(sep1.rstrip())\n cur_len = self._get_text_len(raw_text[0].rstrip())\n answer_targets[:, :system_len] = -100\n answer_targets[:, (system_len+sep_len):cur_len] = -100\n for text in raw_text[1:-1]:\n total_len = self._get_text_len(text.rstrip())\n ans_len = self._get_text_len((text.split(sep1)[0]+sep1).rstrip())\n answer_targets[:, (cur_len+ans_len):(cur_len+total_len)] = -100\n cur_len += total_len\n cur_len += self._get_text_len(raw_text[-1].rstrip())\n if cur_len != answer_targets.shape[1]:\n print(f\"The final length is not equal to the original prompt: {prompt}\")\n assert cur_len == answer_targets.shape[1], (cur_len, answer_targets.shape[1])\n\n max_len = max(max_len, input_embeds.shape[1])\n input_embed_list.append(input_embeds)\n p_0_len_list.append(p_0_tokens.input_ids.shape[1])\n p_1_len_list.append(p_1_tokens.input_ids.shape[1])\n target_list.append(answer_targets)\n\n txt_len = min(max_len + 1, self.max_txt_len + obj_num + 1)\n inputs_embeds = torch.ones([batch_size, txt_len], dtype=torch.long).to(pc_embed.device) * self.llama_tokenizer.pad_token_id\n inputs_embeds = self.llama_model.model.embed_tokens(inputs_embeds)\n attention_mask = torch.zeros([batch_size, txt_len], dtype=torch.long).to(pc_embed.device)\n targets = torch.ones([batch_size, txt_len], dtype=torch.long).to(pc_embed.device).fill_(-100)\n inputs_embeds[:, :1] = self.llama_tokenizer.bos_token_id\n for idx in range(batch_size):\n input_len = min(input_embed_list[idx].shape[1], txt_len - 1)\n inputs_embeds[idx, 1:(input_len+1)] = input_embed_list[idx][:, :input_len]\n attention_mask[idx, :(input_len+1)] = 1\n p_0_len = p_0_len_list[idx]\n p_1_len = p_1_len_list[idx]\n targets[idx, (p_0_len+p_1_len+obj_num+2):(input_len+1)] = target_list[idx][0, :(input_len-p_0_len-p_1_len-obj_num-1)]\n\n outputs = self.llama_model(\n inputs_embeds=inputs_embeds,\n attention_mask=attention_mask,\n return_dict=True,\n labels=targets\n )\n\n return dict(\n loss=outputs.loss\n )\n\n def evaluate(self, scene_feat, scene_locs, scene_colors, scene_mask, custom_prompt, is_eval=True, **kwargs):\n object_embed = self.encode_object_feat(scene_feat, scene_locs, scene_colors)\n device = object_embed.device\n batch_size, obj_num = object_embed.shape[:2]\n proj_object_embed = self.object_proj(object_embed)\n norm_object_embed = torch.nn.functional.normalize(proj_object_embed, dim=-1) * self.obj_norm_scale\n # norm_object_embed = proj_object_embed\n if self.add_scene_token:\n pos_embed = self.pos_proj(scene_locs[:, :, :3])\n scene_embed = proj_object_embed + 0.1 * pos_embed\n\n # scene_embed = scene_embed.mean(dim=1, keepdim=True).repeat(1, scene_embed.shape[1], 1)\n # proj_scene_embed = scene_embed - proj_object_embed\n\n scene_embed = self.relation_module(scene_embed, scene_locs, scene_mask.bool())\n proj_scene_embed = scene_embed - proj_object_embed\n\n norm_scene_embed = torch.nn.functional.normalize(proj_scene_embed, dim=-1) * self.scene_norm_scale\n # norm_scene_embed = proj_scene_embed\n\n output_texts = []\n for i in range(batch_size):\n # tmp_scene_embed, _ = self.prompt_wrap(pc_embed[idx:idx+1], scene_embed[idx:idx+1], scene_attn[idx:idx+1], custom_prompt[idx], is_eval)\n p_0, p_1 = custom_prompt[i].split(\"<REPLACE>\")\n p_0_token = self.llama_tokenizer(p_0, return_tensors=\"pt\", add_special_tokens=is_eval).to(device)\n p_1_token = self.llama_tokenizer(p_1, return_tensors=\"pt\", add_special_tokens=False).to(device)\n p_0_embed = self.llama_model.model.embed_tokens(p_0_token.input_ids)\n p_1_embed = self.llama_model.model.embed_tokens(p_1_token.input_ids)\n\n object_list_embed = self.insert_object_embed(norm_object_embed[i], norm_scene_embed[i] if self.add_scene_token else None, scene_mask[i])\n\n # for j in range(obj_num):\n # start_ind = self.object_list_ind[j]\n # object_list_embed[start_ind:start_ind + 1, :] = scene_embed[i][j]\n # object_list_embed[start_ind + 1:start_ind + 2, :] = pos_embed[i][j]\n object_list_embed = object_list_embed.unsqueeze(0)\n wrapped_embed = torch.cat([p_0_embed, object_list_embed, p_1_embed], dim=1)\n stop_words_ids = [torch.tensor([835]).to(wrapped_embed.device),\n torch.tensor([2277, 29937]).to(wrapped_embed.device)]\n stopping_criteria = StoppingCriteriaList([StoppingCriteriaSub(stops=stop_words_ids)])\n with self.maybe_autocast():\n outputs = self.llama_model.generate(\n inputs_embeds=wrapped_embed,\n max_new_tokens=min(self.max_txt_len * 2, 512),\n stopping_criteria=stopping_criteria,\n num_beams=1,\n do_sample=True,\n min_length=1,\n top_p=0.9,\n repetition_penalty=1.0,\n length_penalty=1,\n temperature=1.0,\n )\n output_token = outputs[0]\n if output_token[0] == 0: # the model might output an unknown token <unk> at the beginning. remove it\n output_token = output_token[1:]\n if output_token[0] == 1: # some users find that there is a start token <s> at the beginning. remove it\n output_token = output_token[1:]\n output_text = self.llama_tokenizer.decode(output_token, add_special_tokens=False)\n output_text = output_text.split(self.end_sym)[0]\n output_texts.append(output_text)\n\n return output_texts\n\n def forward(self, **kwargs):\n if \"target_captions\" in kwargs:\n return self.forward_stage1(**kwargs)\n if \"answers\" in kwargs:\n return self.forward_stage2(**kwargs)\n if \"conversations\" in kwargs:\n return self.forward_stage3(**kwargs)\n if \"custom_prompt\" in kwargs:\n return self.evaluate(**kwargs)\n return None\n\n def _get_text_len(self, text):\n return self.llama_tokenizer(text, return_tensors=\"pt\", add_special_tokens=False).input_ids.shape[1]\n\n def maybe_autocast(self, dtype=torch.float16):\n # if on cpu, don't use autocast\n # if on gpu, use autocast with dtype if provided, otherwise use torch.float16\n enable_autocast = self.device != torch.device(\"cpu\")\n\n if enable_autocast:\n return torch.cuda.amp.autocast(dtype=dtype)\n else:\n return contextlib.nullcontext()\n\n @property\n def device(self):\n return list(self.parameters())[0].device" }, { "identifier": "setup_main", "path": "utils/config_utils.py", "snippet": "def setup_main():\n \"\"\"\n Setup config, logger, output_dir, etc.\n Shared for pretrain and all downstream tasks.\n \"\"\"\n config = setup_config()\n if hasattr(config, \"evaluate\") and config.evaluate:\n config = setup_evaluate_config(config)\n init_distributed_mode(config)\n\n if is_main_process():\n setup_output_dir(config.output_dir, excludes=[\"code\"])\n setup_logger(output=config.output_dir, color=True, name=\"vindlu\")\n logger.info(f\"config: {Config.pretty_text(config)}\")\n Config.dump(config, os.path.join(config.output_dir, \"config.json\"))\n return config" }, { "identifier": "setup_model", "path": "tasks/shared_utils.py", "snippet": "def setup_model(\n config, model_cls, find_unused_parameters=False\n):\n logger.info(\"Creating model\")\n config = copy.deepcopy(config)\n\n model = model_cls(config=config.model)\n\n model = model.to(torch.device(config.device))\n model_without_ddp = model\n if config.distributed:\n model = torch.nn.parallel.DistributedDataParallel(\n model,\n device_ids=[config.gpu],\n find_unused_parameters=find_unused_parameters, # `False` for image-only task\n )\n\n optimizer = create_optimizer(config.optimizer, model)\n scheduler = create_scheduler(config.scheduler, optimizer)\n scaler = torch.cuda.amp.GradScaler(enabled=config.fp16, growth_interval=100)\n\n start_epoch = 0\n global_step = 0\n\n # auto resume the latest checkpoint\n if config.get(\"auto_resume\", False):\n logger.info(\"Auto resuming\")\n model_latest = join(config.output_dir, \"ckpt_latest.pth\")\n model_best = join(config.output_dir, \"ckpt_best.pth\")\n large_num = -1\n for p in os.listdir(config.output_dir):\n if 'ckpt' in p:\n num = p.split('_')[1].split('.')[0]\n if str.isnumeric(num):\n if int(num) > large_num:\n large_num = int(num)\n if large_num != -1:\n model_latest = join(config.output_dir, f\"ckpt_{large_num:02d}.pth\")\n if osp.isfile(model_latest) and not config.pretrained_path:\n config.pretrained_path = model_latest\n config.resume = True\n elif osp.isfile(model_best) and not config.pretrained_path:\n config.pretrained_path = model_best\n config.resume = True\n else:\n logger.info(f\"Not found checkpoint in {config.output_dir}\")\n\n if osp.isfile(config.pretrained_path):\n checkpoint = torch.load(config.pretrained_path, map_location=\"cpu\")\n state_dict = checkpoint[\"model\"]\n\n if config.resume:\n optimizer.load_state_dict(checkpoint[\"optimizer\"])\n scheduler.load_state_dict(checkpoint[\"scheduler\"])\n scaler.load_state_dict(checkpoint[\"scaler\"])\n start_epoch = checkpoint[\"epoch\"] + 1\n global_step = checkpoint[\"global_step\"]\n\n # for k in list(state_dict.keys()):\n # if \"relation_module\" in k:\n # del state_dict[k]\n\n msg = model_without_ddp.load_state_dict(state_dict, strict=False)\n # object_proj_dict = {}\n # for k in state_dict.keys():\n # if \"object_proj\" in k:\n # object_proj_dict[k.split(\"object_proj.\")[1]] = state_dict[k]\n # model_without_ddp.scene_proj.load_state_dict(object_proj_dict, strict=False)\n logger.info(msg)\n logger.info(f\"Loaded checkpoint from {config.pretrained_path}\")\n else:\n logger.warning(\"No pretrained checkpoint provided, training from scratch\")\n\n return (\n model,\n model_without_ddp,\n optimizer,\n scheduler,\n scaler,\n start_epoch,\n global_step,\n )" }, { "identifier": "setup_seed", "path": "utils/basic_utils.py", "snippet": "def setup_seed(seed):\n torch.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)" }, { "identifier": "get_rank", "path": "utils/distributed.py", "snippet": "def get_rank():\n if not is_dist_avail_and_initialized():\n return 0\n return dist.get_rank()" }, { "identifier": "process_batch_data", "path": "dataset/base_dataset.py", "snippet": "def process_batch_data(scene_feats, scene_locs, scene_colors):\n max_obj_num = max([e.shape[0] for e in scene_feats])\n # max_obj_num = 110\n batch_size = len(scene_feats)\n batch_scene_feat = torch.zeros(batch_size, max_obj_num, scene_feats[0].shape[-1])\n batch_scene_locs = torch.zeros(batch_size, max_obj_num, scene_locs[0].shape[-1])\n batch_scene_colors = torch.zeros(batch_size, max_obj_num, scene_colors[0].shape[-2], scene_colors[0].shape[-1])\n batch_scene_mask = torch.zeros(batch_size, max_obj_num, dtype=torch.long)\n for i in range(batch_size):\n batch_scene_feat[i][:scene_feats[i].shape[0]] = scene_feats[i]\n batch_scene_locs[i][:scene_locs[i].shape[0]] = scene_locs[i]\n batch_scene_colors[i][:scene_colors[i].shape[0]] = scene_colors[i]\n batch_scene_mask[i][:scene_feats[i].shape[0]] = 1\n return batch_scene_feat, batch_scene_locs, batch_scene_colors, batch_scene_mask" } ]
import json import jsonlines import math import torch import sys import torch from models.chat3d import Chat3D from utils.config_utils import setup_main from tasks.shared_utils import setup_model from utils.basic_utils import setup_seed from utils.distributed import get_rank from dataset.base_dataset import process_batch_data from tqdm import tqdm
9,435
""" loss/og3d: 2.9594, loss/obj3d_clf: 3.3753, loss/obj3d_clf_pre: 2.0714, loss/txt_clf: 0.6708, loss/total: 10.2789, loss/cross_attn_0: 0.0032, loss/cross_attn_1: 0.0011, loss/cross_attn_2: 0.0011, loss/cross_attn_3: 0.0012, loss/self_attn_0: 0.1595, loss/self_attn_1: 0.0425, loss/self_attn_2: 0.0541, loss/self_attn_3: 0.1030, loss/hidden_state_0: 0.3919, loss/hidden_state_1: 0.0765, loss/hidden_state_2: 0.1033, loss/hidden_state_3: 0.1308, loss/hidden_state_4: 0.1337, acc/og3d: 0.6373, acc/og3d_class: 0.8903, acc/obj3d_clf: 0.6828, acc/obj3d_clf_pre: 0.6131, acc/txt_clf: 0.9281 """ val_file = "/root/scene-LLaMA/datasets/exprs_neurips22/gtlabelpcd_mix/nr3d/preds/val_outs.json" nr3d_anno_file = "/root/scene-LLaMA/datasets/referit3d/annotations/bert_tokenized/nr3d.jsonl" anno_root = "annotations" # annotation dir attribute_file = f"{anno_root}/scannet_attributes_old.json" attributes = json.load(open(attribute_file, 'r')) val_results = json.load(open(val_file)) nr3d_anno = {} with jsonlines.open(nr3d_anno_file, "r") as reader: for l in reader: nr3d_anno[l["item_id"]] = l item_list = [] acc = 0 for k, v in val_results.items(): obj_ids = v["obj_ids"] obj_logits = v["obj_logits"] obj_logits = (torch.tensor(obj_logits)).softmax(dim=-1).tolist() scan_id = nr3d_anno[k]["scan_id"] utter = nr3d_anno[k]["utterance"] target_id = nr3d_anno[k]["target_id"] obj_num = len(attributes[scan_id]["locs"]) assert target_id < obj_num, f"{obj_num}, {target_id}, {scan_id}" logit_ids = zip(obj_logits, obj_ids) logit_ids = sorted(logit_ids, reverse=True) logits, ids = zip(*logit_ids) # logits = (torch.tensor(logits[:5]) / 5.).softmax(dim=-1).tolist() print(logits) if ids[0] == target_id: acc += 1 item_list.append({ "can_ids": ids[:5], "can_preds": logits[:5], "utter": utter, "target_id": target_id, "scan_id": scan_id }) # print(target_id) # print(ids[:5]) # print(logits[:5]) # exit() print("Acc:", float(acc) / len(item_list)) # print(item_list[:5]) # exit() sys.path.append(".")
""" loss/og3d: 2.9594, loss/obj3d_clf: 3.3753, loss/obj3d_clf_pre: 2.0714, loss/txt_clf: 0.6708, loss/total: 10.2789, loss/cross_attn_0: 0.0032, loss/cross_attn_1: 0.0011, loss/cross_attn_2: 0.0011, loss/cross_attn_3: 0.0012, loss/self_attn_0: 0.1595, loss/self_attn_1: 0.0425, loss/self_attn_2: 0.0541, loss/self_attn_3: 0.1030, loss/hidden_state_0: 0.3919, loss/hidden_state_1: 0.0765, loss/hidden_state_2: 0.1033, loss/hidden_state_3: 0.1308, loss/hidden_state_4: 0.1337, acc/og3d: 0.6373, acc/og3d_class: 0.8903, acc/obj3d_clf: 0.6828, acc/obj3d_clf_pre: 0.6131, acc/txt_clf: 0.9281 """ val_file = "/root/scene-LLaMA/datasets/exprs_neurips22/gtlabelpcd_mix/nr3d/preds/val_outs.json" nr3d_anno_file = "/root/scene-LLaMA/datasets/referit3d/annotations/bert_tokenized/nr3d.jsonl" anno_root = "annotations" # annotation dir attribute_file = f"{anno_root}/scannet_attributes_old.json" attributes = json.load(open(attribute_file, 'r')) val_results = json.load(open(val_file)) nr3d_anno = {} with jsonlines.open(nr3d_anno_file, "r") as reader: for l in reader: nr3d_anno[l["item_id"]] = l item_list = [] acc = 0 for k, v in val_results.items(): obj_ids = v["obj_ids"] obj_logits = v["obj_logits"] obj_logits = (torch.tensor(obj_logits)).softmax(dim=-1).tolist() scan_id = nr3d_anno[k]["scan_id"] utter = nr3d_anno[k]["utterance"] target_id = nr3d_anno[k]["target_id"] obj_num = len(attributes[scan_id]["locs"]) assert target_id < obj_num, f"{obj_num}, {target_id}, {scan_id}" logit_ids = zip(obj_logits, obj_ids) logit_ids = sorted(logit_ids, reverse=True) logits, ids = zip(*logit_ids) # logits = (torch.tensor(logits[:5]) / 5.).softmax(dim=-1).tolist() print(logits) if ids[0] == target_id: acc += 1 item_list.append({ "can_ids": ids[:5], "can_preds": logits[:5], "utter": utter, "target_id": target_id, "scan_id": scan_id }) # print(target_id) # print(ids[:5]) # print(logits[:5]) # exit() print("Acc:", float(acc) / len(item_list)) # print(item_list[:5]) # exit() sys.path.append(".")
config = setup_main()
1
2023-12-11 14:39:58+00:00
12k
SqueezeBits/owlite
owlite/backend/onnx/transforms.py
[ { "identifier": "log", "path": "owlite/logger.py", "snippet": "class Logger(logging.Logger):\n class _WarningFilterContext:\n class WarningFilter(logging.Filter):\n ENV_VAR = \"OWLITE_LOG_LEVEL\"\n DEBUG_WARNING = 15\n ULTRA_VERBOSE = -10\n def ignore_warnings(self):\n def __init__(self, logger) -> None:\n def __enter__(self):\n def filter(self, record):\n def __exit__(self, exc_type, exc_val, exc_tb):\n def debug_warning(self, msg, *args, **kwargs):\n def level(self) -> int:\n def level(self, value):\ndef suppress_owlite_warnings(cls):\n def new_init(self, *args, **kwargs):" }, { "identifier": "is_floating_point", "path": "owlite/backend/utils.py", "snippet": "def is_floating_point(dtype: Optional[Union[np.dtype, \"TensorProto.DataType\"]]) -> bool:\n \"\"\"Checks if the dtype is a floating point type\n\n Args:\n dtype (Optional[Union[np.dtype, TensorProto.DataType]]): a dtype\n\n Returns:\n bool: True if the dtype is a floating point type, False otherwise.\n \"\"\"\n dtype = get_numpy_type(dtype)\n if dtype is None:\n return False\n return np.issubdtype(dtype, np.floating)" }, { "identifier": "nodestr", "path": "owlite/backend/utils.py", "snippet": "def nodestr(node: Optional[AnyNode], show_activations: bool = False) -> str:\n \"\"\"Generates the string representation of a node instance\n\n Args:\n node (Optional[AnyNode]): a node. Must be an instance of one of the types:\n torch.fx.Node, onnx.NodeProto or gs.Node\n show_activations (bool, optional): Only available if node is either onnx.NodeProto or gs.Node instance. If True,\n the string representation contains the information about the node's input and output activations.\n Defaults to False.\n\n Returns:\n str: the string representation of the node\n \"\"\"\n if node is None:\n return \"<node-not-found>\"\n if isinstance(node, ONNXNode):\n s = f\"{node.name} ({node.op_type})\"\n if show_activations:\n a = json.dumps(\n {\"inputs\": list(node.input), \"outputs\": list(node.output)},\n indent=2,\n sort_keys=True,\n )\n s = f\"{s}: {a}\"\n return s\n if isinstance(node, FXNode):\n if (\n node.op == \"call_module\"\n and isinstance(node.target, str)\n and isinstance(node.graph.owning_module, GraphModule)\n ):\n target = node.graph.owning_module.get_submodule(node.target)\n else:\n target = node.target\n s = f\"{node.name}: {node.op}({targetstr(target)})\"\n if show_activations:\n a = json.dumps(\n {\n \"args\": f\"{node.args}\",\n \"kwargs\": f\"{node.kwargs}\",\n \"inputs\": [*map(nodestr, node.all_input_nodes)],\n \"outputs\": [*map(nodestr, node.users)],\n },\n indent=2,\n sort_keys=True,\n )\n s = f\"{s}: {a}\"\n return s\n if isinstance(node, gs.Node):\n s = f\"{node.name} ({node.op})\"\n if show_activations:\n a = json.dumps(\n {\n \"inputs\": [*(t.name for t in node.inputs)],\n \"outputs\": [*(t.name for t in node.outputs)],\n },\n indent=2,\n sort_keys=True,\n )\n s = f\"{s}: {a}\"\n return s\n return \"<not-a-node>\"" }, { "identifier": "fold_constants", "path": "owlite/backend/onnx/fold_constants.py", "snippet": "def fold_constants(\n graph: Graph,\n fold_shapes=True,\n recurse_subgraphs=True,\n partitioning=None,\n error_ok=True,\n flatten_subgraphs=True,\n size_threshold=None,\n should_exclude_node=None,\n) -> Graph:\n \"\"\"\n Folds constants in-place in the graph. The graph must be topologically sorted prior to\n calling this function (see `toposort()`).\n\n This function will not remove constants after folding them. In order to get rid of\n these hanging nodes, you can run the `cleanup()` function.\n\n *Note: Due to how this function is implemented, the graph must be exportable to ONNX,\n and evaluable in ONNX-Runtime. Additionally, ONNX-Runtime must be installed.*\n\n Args:\n fold_shapes (bool):\n Whether to fold `Shape` nodes in the graph.\n This requires shapes to be inferred in the graph, and can only fold\n static shapes.\n Defaults to True.\n recurse_subgraphs (bool):\n Whether to recursively fold constants in subgraphs.\n Defaults to True.\n partitioning (Union[str, None]):\n Whether/How to partition the graph so that errors in folding one\n part of a model do not affect other parts. Available modes are:\n\n - None: Do not partition the graph. If inference fails, no constants are folded.\n - \"basic\": Partition the graph. If inference fails in one partition, other partitions will\n remain unaffected.\n - \"recursive\": Parition the graph recursively. If inference fails in a partition, the partition\n will be further paritioned.\n\n Defaults to None.\n error_ok (bool):\n Whether inference errors should be suppressed.\n When this is False, any errors encountered during inference will be re-raised.\n Defaults to True.\n flatten_subgraphs (bool):\n Whether to flatten subgraphs where possible. For example, `If` nodes with a constant condition\n can be flattened into the parent graph.\n size_threshold (int):\n The maximum size threshold, in bytes, for which to fold constants.\n Any tensors larger than this value will not be folded.\n Set to ``None`` to disable the size threshold and always fold constants.\n For example, some models may apply ops like `Tile` or `Expand` to constants, which can\n result in very large tensors. Rather than pre-computing those constants and bloating\n the model size, it may be desirable to skip folding them and allow them to be computed\n at runtime.\n Defaults to None.\n should_exclude_node (Callable[[gs.Node], bool]):\n A callable that accepts an onnx-graphsurgeon node from the graph and reports whether it should\n be excluded from folding. This is only called for nodes which are otherwise foldable.\n Note that preventing a node from being folded also prevents its consumers from being folded.\n Defaults to a callable that always returns False.\n\n Returns:\n graph\n \"\"\"\n from onnx_graphsurgeon.exporters.onnx_exporter import dtype_to_onnx, export_onnx\n\n should_exclude_node = misc.default_value(should_exclude_node, lambda node: False)\n\n PARTITIONING_MODES = [None, \"basic\", \"recursive\"]\n if partitioning not in PARTITIONING_MODES:\n G_LOGGER.critical(\n f\"Argument for parameter 'partitioning' must be one of: {PARTITIONING_MODES}\"\n )\n ORT_PROVIDERS = [\"CPUExecutionProvider\"]\n\n G_LOGGER.debug(f\"Folding constants in {graph.name}\")\n\n # We apply constant folding in 5 passes:\n # Pass 1 lowers 'Constant' nodes into Constant tensors.\n # Pass 2 elides casts applied to shape tensors. This is done separately from other shape folding\n # since it operates on the original graph rather than a clone.\n # Pass 3 finds all Constant tensors in the graph, then finds all descendants which are dependent\n # only on constants.\n # Pass 4 searches for Shape nodes that have variable inputs (i.e. not marked const in pass 1)\n # and turns them into Constants iff the input has a statically known shape.\n # Pass 5 computes the descendants determined in Pass 3 using ONNX-Runtime and replaces them in the graph.\n\n # Pass 1: Lower constant nodes\n for tensor in graph.tensors().values():\n if len(tensor.inputs) == 1:\n node = tensor.inputs[0]\n if node.op == \"Constant\":\n tensor.to_constant(\n node.attrs[\"value\"]._values\n ) # Using ._values avoids copying\n tensor.inputs.clear()\n\n # Pass 2: Run shape-tensor cast elision\n def run_cast_elision(node):\n import onnx\n\n # Search for Cast(s) (from int -> float) -> intermediate operator (with float constants) -> Cast(s) (back to int)\n # This pattern is problematic for TensorRT since these operations may be performed on Shape Tensors, which\n # are not allowed to be floating point type. Attempt to fold the pattern here\n VALID_CAST_ELISION_OPS = [\n \"Add\",\n \"Sub\",\n \"Mul\",\n \"Div\",\n \"Max\",\n \"Min\",\n \"Equal\",\n \"Greater\",\n \"Less\",\n \"Concat\",\n ]\n\n if node.op not in VALID_CAST_ELISION_OPS:\n return\n\n # If the uncasted outputs of this node have any consumers other than \"Cast\" nodes,\n # then we cannot elide the cast.\n for out_tensor in node.outputs:\n if out_tensor in graph.outputs:\n return\n\n if any(out_node.op != \"Cast\" for out_node in out_tensor.outputs):\n return\n\n # Get list of input nodes that cast to float32\n inp_casts = [\n inp_node\n for inp_tensor in node.inputs\n for inp_node in inp_tensor.inputs\n if inp_node.op == \"Cast\"\n and inp_node.attrs[\"to\"] == onnx.TensorProto.DataType.FLOAT\n ]\n\n # [SQZB] Ensure that Cast nodes are attached to all of the input nodes.\n # Otherwise, onnx simplifier's shape inference could fail. (e.g. the test case \"torch_f\")\n if len(inp_casts) < len(node.inputs):\n return\n\n # No cast nodes found, return early\n if not inp_casts:\n return\n\n # Ensure that all input cast nodes are casting from the same type\n inp_dtypes = [dtype_to_onnx(inp_cast.inputs[0].dtype) for inp_cast in inp_casts]\n if len(set(inp_dtypes)) != 1:\n return\n\n final_type = inp_dtypes[0]\n\n # Get list of output nodes that cast to int32 or int64\n out_casts = [\n out_node\n for out_tensor in node.outputs\n for out_node in out_tensor.outputs\n if out_node.op == \"Cast\"\n and out_node.attrs[\"to\"]\n in [onnx.TensorProto.DataType.INT32, onnx.TensorProto.DataType.INT64]\n ]\n\n # No cast node found on outputs, return early\n if not out_casts:\n return\n\n # Ensure that all output cast nodes are casting to the same type and that this\n # matches the original type before the inputs were casted.\n out_dtypes = [out_cast.attrs[\"to\"] for out_cast in out_casts]\n if len(set(out_dtypes)) != 1 or out_dtypes[0] != final_type:\n return\n\n # If all checks passed, reconnect inputs/outputs to the consumers/producers\n # of the Cast nodes.\n # Note that we need to be careful in how we rebind tensors since they may\n # be used by multiple nodes. Thus, it is not necessarily safe to assume that\n # `cast_node.inputs[0].outputs[0] == cast_node`.\n for index, inp in enumerate(node.inputs):\n if isinstance(inp, Constant):\n inp.values = inp.values.astype(\n onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[final_type]\n )\n\n for cast in inp_casts:\n if cast.outputs[0] == inp:\n node.inputs[index] = cast.inputs[0]\n\n for index, out in enumerate(node.outputs):\n for cast in out_casts:\n if cast.inputs[0] == out:\n out_tensor = cast.outputs[0]\n out_tensor.inputs.clear() # Disconnect from Cast\n node.outputs[index] = out_tensor\n\n if fold_shapes:\n # Perform shape tensor cast elision prior to most other folding\n G_LOGGER.debug(f\"Performing shape tensor cast elision in {graph.name}\")\n try:\n with graph.node_ids():\n for node in graph.nodes:\n run_cast_elision(node)\n except Exception as err:\n if not error_ok:\n raise err\n G_LOGGER.warning(\n \"'{:}' routine failed with: {:}\".format(\n \"Shape tensor cast elision\", err\n )\n )\n\n # Note that most of the remaining passes operate on a clone of the original graph.\n # Pass 3: Find all descendants of constant tensors\n\n graph_clone = graph.copy()\n clone_tensors = graph_clone.tensors()\n\n def update_foldable_outputs(graph_constants):\n def is_foldable(node):\n NO_FOLD_OPS = [\n \"QuantizeLinear\",\n \"DequantizeLinear\",\n \"DynamicQuantizeLinear\",\n ]\n if node.op in NO_FOLD_OPS:\n return False\n\n def all_tensors_const(tensors):\n return all(t.name in graph_constants for t in tensors)\n\n if not all_tensors_const(node.inputs):\n return False\n\n all_subgraph_foreign_tensors_const = True\n for attr in node.attrs.values():\n if isinstance(attr, Graph):\n foreign_tensors = attr._foreign_tensors().values()\n all_subgraph_foreign_tensors_const &= all_tensors_const(\n foreign_tensors\n )\n\n return all_subgraph_foreign_tensors_const and not should_exclude_node(node)\n\n # Walks along the outputs of graph_constants to see if they can also be computed statically.\n # Since the graph is topologically sorted, this should find all constant nodes in the graph.\n for node in graph_clone.nodes:\n if is_foldable(node):\n graph_constants.update({out.name: out for out in node.outputs})\n return graph_constants\n\n graph_constants = {\n name: tensor\n for name, tensor in clone_tensors.items()\n if isinstance(tensor, Constant)\n }\n graph_constants = update_foldable_outputs(graph_constants)\n\n # Pass 4: Shape Folding\n\n def get_producer(tensor, op):\n \"\"\"\n Get the producer of the specified tensor iff it matches op\n \"\"\"\n if len(tensor.inputs) != 1:\n return None\n\n node = tensor.inputs[0]\n if node.op != op:\n return None\n return node\n\n def get_input(node, index=0):\n \"\"\"\n Get the input tensor of a node iff the input tensor is not already marked a graph constant.\n \"\"\"\n if node is None:\n return None\n\n inp = node.inputs[index]\n\n # If the input was already found to be a constant, it will be folded anyway.\n if inp.name in graph_constants:\n return None\n\n return inp\n\n def get_scalar_value(tensor):\n \"\"\"\n Gets the scalar value of a constant tensor with a single item\n \"\"\"\n if not tensor.shape:\n return tensor.values\n else:\n return list(tensor.values)[0]\n\n def fold_shape(tensor):\n inp = get_input(get_producer(tensor, \"Shape\"))\n if inp is None:\n return None\n\n if inp.shape is None or misc.is_dynamic_shape(inp.shape):\n return None\n return np.array(inp.shape, dtype=np.int64)\n\n def fold_shape_gather(tensor):\n gather = get_producer(tensor, \"Gather\")\n if gather is None:\n return None\n\n data = gather.inputs[0]\n indices_tensor = gather.inputs[1]\n\n inp = get_input(get_producer(data, \"Shape\"))\n if inp is None or inp.shape is None:\n return None\n\n if not isinstance(indices_tensor, Constant):\n return None\n\n indices = indices_tensor.values\n if not indices.shape: # Scalar-case\n shape = inp.shape[int(indices)]\n if misc.is_dynamic_dimension(shape):\n return None\n else:\n shape = [inp.shape[index] for index in indices]\n if misc.is_dynamic_shape(shape):\n return None\n\n return np.array(shape, dtype=np.int64)\n\n def fold_shape_slice(tensor):\n slice = get_producer(tensor, \"Slice\")\n if slice is None:\n return None\n\n data = slice.inputs[0]\n\n if len(slice.inputs) >= 3:\n starts, ends = slice.inputs[1:3]\n if any(not isinstance(t, Constant) for t in [starts, ends]):\n return None\n starts, ends = get_scalar_value(starts), get_scalar_value(ends)\n elif \"starts\" in slice.attrs and \"ends\" in slice.attrs:\n starts, ends = slice.attrs[\"starts\"][0], slice.attrs[\"ends\"][0]\n else:\n return None\n\n inp = get_input(get_producer(data, \"Shape\"))\n if inp is None or inp.shape is None:\n return None\n\n # For shape tensors, we can only slice on the 0th dimension.\n if len(slice.inputs) > 3:\n axes = slice.inputs[3]\n if not isinstance(axes, Constant):\n return None\n\n if get_scalar_value(axes) != 0:\n return None\n elif \"axes\" in slice.attrs:\n if slice.attrs[\"axes\"][0] != 0:\n return None\n\n steps = 1\n if len(slice.inputs) > 4:\n steps = slice.inputs[4]\n if not isinstance(steps, Constant):\n return None\n steps = get_scalar_value(steps)\n elif \"steps\" in slice.attrs:\n steps = slice.attrs[\"steps\"][0]\n\n shape = inp.shape[starts:ends:steps]\n if misc.is_dynamic_shape(shape):\n return None\n\n return np.array(shape, dtype=np.int64)\n\n if fold_shapes:\n # NOTE: The order of shape folding passes is important to maximize how much we fold (phase-ordering problem).\n SHAPE_FOLD_FUNCS = [fold_shape_gather, fold_shape_slice, fold_shape]\n for shape_fold_func in SHAPE_FOLD_FUNCS:\n try:\n for tensor in clone_tensors.values():\n shape_of = shape_fold_func(tensor)\n\n if shape_of is not None:\n G_LOGGER.ultra_verbose(\n f\"Folding shape tensor: {tensor.name} to: {shape_of}\"\n )\n graph_constants[tensor.name] = tensor.to_constant(shape_of)\n graph_constants[tensor.name].inputs.clear()\n except Exception as err:\n if not error_ok:\n raise err\n G_LOGGER.warning(\n f\"'{shape_fold_func.__name__}' routine failed with:\\n{err}\"\n )\n else:\n graph_constants = update_foldable_outputs(graph_constants)\n\n # Pass 5: Evaluate all tensors descended from constants with ONNX-Runtime and replace them with constant values.\n\n def partition_and_infer(subgraph):\n def get_out_node_ids():\n # Gets the final output nodes - producer nodes of graph output tensors without other outputs.\n with subgraph.node_ids():\n out_node_ids = set()\n for out in subgraph.outputs:\n if not out.outputs and not isinstance(out, Constant):\n for n_inp in out.inputs:\n out_node_ids.add(subgraph._get_node_id(n_inp))\n return out_node_ids\n\n # Compute each output node in a separate subgraph.\n out_node_ids = get_out_node_ids()\n constant_values = {}\n\n for index in out_node_ids: # Have to use index since 'node' is not in part\n part = subgraph.copy()\n out_node = part.nodes[index]\n part.outputs = out_node.outputs\n part.name = f\"Folding: {[out.name for out in part.outputs]}\"\n part.cleanup(remove_unused_graph_inputs=True)\n names = [out.name for out in part.outputs]\n\n try:\n # Determining types is not trivial, and ONNX-RT does its own type inference.\n import onnxruntime as onnxrt\n\n sess = onnxrt.InferenceSession(\n export_onnx(part, do_type_check=False).SerializeToString(),\n providers=ORT_PROVIDERS,\n )\n values = sess.run(names, {})\n except Exception as err:\n G_LOGGER.warning(\n f\"Inference failed for subgraph: {part.name}. Note: Error was:\\n{err}\"\n )\n if partitioning == \"recursive\":\n G_LOGGER.verbose(\"Attempting to recursively partition subgraph\")\n # Partition failed, peel off last node.\n # We only need to remove one node, so avoid doing an expensive call to cleanup()\n part.outputs = out_node.inputs\n del part.nodes[part.nodes.index(out_node)]\n out_node.outputs.clear()\n out_node.inputs.clear()\n else:\n G_LOGGER.info(\n \"You may see better results if you set partitioning='recursive'\"\n )\n if not error_ok:\n raise err\n\n constant_values.update(partition_and_infer(part))\n else:\n constant_values.update(dict(zip(names, values)))\n\n return constant_values\n\n # Only evaluate foldable values that have non-foldable outputs or are graph outputs.\n # Otherwise, if all the outputs are foldable, then we can just evaluate the outputs directly.\n # Additionally, if we can determine tensor size, do not evaluate tensors whose sizes exceed the size threshold.\n def should_eval_foldable(tensor):\n from onnx_graphsurgeon.importers.onnx_importer import get_itemsize\n\n non_const = not isinstance(tensor, Constant)\n is_graph_output = not tensor.outputs\n has_non_foldable_outputs = any(\n out.name not in graph_constants for out in tensor.outputs\n )\n exceeds_size_threshold = (\n tensor.shape is not None\n and not misc.is_dynamic_shape(tensor.shape)\n and tensor.dtype is not None\n and size_threshold is not None\n ) and (misc.volume(tensor.shape) * get_itemsize(tensor.dtype) > size_threshold)\n\n return (\n non_const\n and (is_graph_output or has_non_foldable_outputs)\n and not exceeds_size_threshold\n )\n\n graph_clone.outputs = [\n t for t in graph_constants.values() if should_eval_foldable(t)\n ]\n G_LOGGER.debug(f\"Folding tensors: {graph_clone.outputs}\")\n graph_clone.cleanup(remove_unused_graph_inputs=True)\n\n # Using ._values avoids a deep copy of the values.\n constant_values = {\n name: tensor._values\n for name, tensor in graph_constants.items()\n if isinstance(tensor, Constant)\n }\n if graph_clone.outputs:\n if partitioning:\n constant_values.update(partition_and_infer(graph_clone))\n else:\n names = [t.name for t in graph_clone.outputs]\n try:\n import onnxruntime as onnxrt\n\n sess = onnxrt.InferenceSession(\n export_onnx(graph_clone, do_type_check=False).SerializeToString(),\n providers=ORT_PROVIDERS,\n )\n values = sess.run(names, {})\n constant_values.update(dict(zip(names, values)))\n except Exception as err:\n G_LOGGER.warning(\n \"Inference failed. You may want to try enabling partitioning to see better results. \"\n \"Note: Error was:\\n{:}\".format(err)\n )\n G_LOGGER.verbose(f\"Note: Graph was:\\n{graph_clone}\")\n if not error_ok:\n raise\n elif not constant_values:\n G_LOGGER.debug(\n \"Could not find any nodes in this graph ({:}) that can be folded. \"\n \"This could mean that constant folding has already been run on this graph. \"\n \"Skipping.\".format(graph.name)\n )\n\n # Finally, replace the Variables in the original graph with constants.\n large_tensors = {}\n if constant_values:\n graph_tensors = graph.tensors()\n for name, values in constant_values.items():\n tensor = graph_tensors[name]\n if isinstance(tensor, Constant):\n # No need to fold tensors that are already constant.\n continue\n\n if size_threshold is not None and values.nbytes > size_threshold:\n G_LOGGER.debug(\n \"Will not fold: '{:}' since its size in bytes ({:}) exceeds the size threshold ({:})\".format(\n name, values.nbytes, size_threshold\n )\n )\n continue\n elif size_threshold is None and values.nbytes > (1 << 20):\n large_tensors[name] = values.nbytes\n\n tensor.to_constant(values)\n tensor.inputs.clear() # Constants do not need inputs\n\n if large_tensors:\n large_tensors_mib = {\n tensor_name: f\"{value // (1 << 20)} MiB\"\n for tensor_name, value in large_tensors.items()\n }\n G_LOGGER.warning(\n \"It looks like this model contains foldable nodes that produce large outputs.\\n\"\n \"In order to avoid bloating the model, you may want to set a constant-folding size threshold.\\n\"\n \"Note: Large tensors and their corresponding sizes were: {:}\".format(\n large_tensors_mib\n ),\n mode=LogMode.ONCE,\n )\n\n # Folding subgraphs after the outer graph can lead to better folding.\n def fold_subgraphs():\n for node in graph.nodes:\n for attr in node.attrs.values():\n if isinstance(attr, Graph):\n attr.fold_constants(\n fold_shapes=fold_shapes,\n recurse_subgraphs=recurse_subgraphs,\n partitioning=partitioning,\n error_ok=error_ok,\n flatten_subgraphs=flatten_subgraphs,\n size_threshold=size_threshold,\n )\n\n if recurse_subgraphs:\n fold_subgraphs()\n\n if flatten_subgraphs:\n # Flatten conditional subgraphs\n index = 0\n while index < len(graph.nodes):\n node = graph.nodes[index]\n if node.op == \"If\" and isinstance(node.inputs[0], Constant):\n G_LOGGER.debug(f\"Flattening conditional: {node}\")\n cond = get_scalar_value(node.inputs[0])\n subgraph = (\n node.attrs[\"then_branch\"] if cond else node.attrs[\"else_branch\"]\n )\n # Need to add a suffix to subgraph tensors so they don't collide with outer graph tensors\n for tensor in subgraph._local_tensors().values():\n tensor.name += f\"_subg_{index}_{subgraph.name}\"\n\n # The subgraph outputs correspond to the If node outputs. Only the latter are visible\n # in the parent graph, so we rebind the producer nodes of the subgraph outputs to point\n # to the output tensors of the If instead.\n for node_out, subgraph_out in zip(node.outputs, subgraph.outputs):\n node_out.inputs.clear()\n for producer in subgraph_out.inputs:\n for tensor_idx, out_tensor in enumerate(producer.outputs):\n if out_tensor == subgraph_out:\n producer.outputs[tensor_idx] = node_out\n\n # Copy subgraph nodes into parent graph at the index of the If.\n del graph.nodes[index]\n graph.nodes[index:index] = subgraph.nodes\n index += len(subgraph.nodes) - 1\n\n index += 1\n\n return graph" }, { "identifier": "ONNXOp", "path": "owlite/backend/onnx/onnx_op.py", "snippet": "class ONNXOp:\n \"\"\"Class representing each ONNX op allowing convenient access to its schema properties\"\"\"\n\n schemas: dict[str, OpSchema] = get_core_operator_schemas()\n\n def __init__(self, name: str) -> None:\n self.name = name\n\n def __repr__(self) -> str:\n return f\"{self.name}\"\n\n def __str__(self) -> str:\n return self.__repr__()\n\n @property\n def is_valid(self) -> bool:\n \"\"\"Checks if the op exists in schemas\"\"\"\n return self.name in ONNXOp.schemas\n\n @property\n def schema(self) -> OpSchema:\n \"\"\"The full schema structure of the op\n\n Returns:\n list[tuple[int, list[tuple[str, OpSchema, list[OpSchema]]]]]: the full schema structure\n \"\"\"\n return ONNXOp.schemas[self.name]\n\n @property\n def type_constraints(self) -> dict[str, list[str]]:\n \"\"\"The dictionary that maps type parameter string to its allowed type strings\n\n Returns:\n dict[str, list[str]]: _description_\n \"\"\"\n return {\n type_constraint.type_param_str: type_constraint.allowed_type_strs\n for type_constraint in self.schema.type_constraints\n }\n\n def i(self, idx: int = 0) -> \"FormalONNXParameter\":\n \"\"\"The formal ONNX paramter of the input at given index.\n\n Args:\n idx (int, optional): the input index. Defaults to 0.\n\n Returns:\n FormalONNXParameter: the formal ONNX paramter of the input.\n \"\"\"\n return self._get_formal_parameter(self.schema.inputs, idx)\n\n def o(self, idx: int = 0) -> \"FormalONNXParameter\":\n \"\"\"The formal ONNX paramter of the output at given index.\n\n Args:\n idx (int, optional): the output index. Defaults to 0.\n\n Returns:\n FormalONNXParameter: the formal ONNX paramter of the output.\n \"\"\"\n return self._get_formal_parameter(self.schema.outputs, idx)\n\n def _get_formal_parameter(self, params: list, idx: int = 0) -> FormalONNXParameter:\n is_last_parameter_variadic = (\n params[-1].option == OpSchema.FormalParameterOption.Variadic\n )\n if not (-len(params) <= idx < len(params) or is_last_parameter_variadic):\n raise IndexError(f\"{self.name}: index out of range: {idx}\")\n if is_last_parameter_variadic:\n param_idx = min(idx, len(params) - 1)\n offset = idx - param_idx\n param = params[param_idx]\n param_name = f\"{param.name}_{offset}\"\n else:\n param = params[idx]\n param_name = param.name\n return FormalONNXParameter(\n name=param_name,\n is_optional=OpSchema.FormalParameterOption.Optional == param.option,\n is_variadic=OpSchema.FormalParameterOption.Variadic == param.option,\n is_homogeneous=param.is_homogeneous,\n is_differentiable=OpSchema.DifferentiationCategory.Differentiable\n == param.differentiation_category,\n type_constraints=convert_to_np_dtypes(\n self.type_constraints.get(param.type_str, param.type_str)\n ),\n )" } ]
import os import sys import uuid import numpy as np import onnx import onnx_graphsurgeon as gs from collections import Counter, OrderedDict from collections.abc import Iterable from itertools import chain from typing import Callable, Optional, Union, cast from onnx import GraphProto, ModelProto, TensorProto from onnx.external_data_helper import ( _get_attribute_tensors_from_graph, _get_initializer_tensors_from_graph, _is_valid_filename, save_external_data, set_external_data, uses_external_data, ) from onnx_graphsurgeon.exporters.onnx_exporter import OnnxExporter from onnx_graphsurgeon.importers.onnx_importer import get_numpy_type from ...logger import log from ..utils import is_floating_point, nodestr from .fold_constants import fold_constants from .onnx_op import ONNXOp
9,943
conv_weight.values *= value_to_fold.reshape(target_shape) conv_node.outputs = mul_or_div_node.outputs mul_or_div_node.outputs.clear() def _fold_add_or_sub(conv_node: gs.Node, add_or_sub_node: gs.Node) -> None: assert conv_node.op == "Conv" and add_or_sub_node.op in ("Add", "Sub") conv_params = _get_constant_conv_params(conv_node) assert conv_params is not None conv_weight, conv_bias, _ = conv_params param_to_fold = _get_constant_param_to_fold(add_or_sub_node) assert param_to_fold is not None value_to_fold: np.ndarray = param_to_fold.values value_to_fold = np.broadcast_to( value_to_fold.squeeze(), conv_weight.values.shape[0] ) if add_or_sub_node.op == "Sub": if ( add_or_sub_node.inputs[1] is conv_node.outputs[0] ): # Sub(param, Conv(x, w, b)) conv_weight.values = -conv_weight.values if conv_bias is not None: conv_bias.values = -conv_bias.values else: # Sub(Conv(x, w, b), param) value_to_fold = -value_to_fold if conv_bias is not None: conv_bias.values += value_to_fold else: new_bias = gs.Constant( param_to_fold.name, value_to_fold, param_to_fold.data_location ) conv_node.inputs.append(new_bias) conv_node.outputs = add_or_sub_node.outputs add_or_sub_node.outputs.clear() if node_to_fold.op in ("Mul", "Div"): _fold_mul_or_div(conv_node, node_to_fold) elif node_to_fold.op in ("Add", "Sub"): _fold_add_or_sub(conv_node, node_to_fold) else: # for future extensibility, we might be able to fold more operations raise NotImplementedError() for node in graph.nodes: if node.op == "Conv": i = 0 while i < MAXIMUM_ITERATION: if _is_foldable(node.outputs[0]): log.debug( f"Folding {nodestr(node.outputs[0].outputs[0])} into {nodestr(node)}" ) _fold(node, node.outputs[0].outputs[0]) i += 1 continue break return graph # pylint: disable=missing-function-docstring def remove_if_dropout_op_with_ratio_zero(node: gs.Node, graph: gs.Graph) -> None: if node.op != "Dropout": return ratio_input_node = input_node_of(node, 1, 0) if ratio_input_node is None or "value" not in ratio_input_node.attrs: return if ratio_input_node.attrs["value"].values.item() != 0.0: return remove_if_has_unique_non_optional_input_and_unique_used_output(node, graph) def remove_if_cast_op_with_no_effect(node: gs.Node, graph: gs.Graph) -> None: if node.op != "Cast": return if "to" not in node.attrs: log.debug_warning(f'Missing required attribute "to" in {nodestr(node)}') return if len(node.inputs) != 1: log.debug_warning( f"{nodestr(node)} must have 1 input but {len(node.inputs)} found: {node.inputs}" ) return if len(node.outputs) != 1: log.debug_warning( f"{nodestr(node)} must have 1 output but {len(node.outputs)} found: {node.outputs}" ) return the_input = node.inputs[0] the_output = node.outputs[0] data_type = cast(TensorProto.DataType, node.attrs["to"]) dtype = get_numpy_type(data_type) if not isinstance(dtype, np.dtype): log.debug_warning( f'Failed to convert attribute "to": {TensorProto.DataType.Name(data_type)}' " of {nodestr(node)} into numpy type" ) return if the_input.dtype in (dtype, the_output.dtype): remove_if_has_unique_non_optional_input_and_unique_used_output(node, graph) def cast_constant_input_fp_tensors_of(node: gs.Node, dtype: np.dtype) -> None:
"""Preprocessing steps required for onnx simplifier and for further optimization""" OnnxTransform = Callable[[gs.Graph], gs.Graph] ONNX_TRANSFORMS: dict[str, OnnxTransform] = {} TensorType = Union[gs.Constant, gs.Variable] MAXIMUM_ITERATION = 100 def apply_onnx_transforms( onnx_proto: ModelProto, output_path: Optional[str] = None, **kwargs ) -> ModelProto: """Applies all transformations registered in this file. Args: onnx_proto (ModelProto): the ONNX model proto to apply transformations. output_path (Optional[str], optional): the output path in string. If provided, runs the ModelProto will be written with external data after the transformations (required for large models > 2GB). Defaults to None. Returns: ModelProto: _description_ """ graph = gs.import_onnx(onnx_proto) for name, transform in ONNX_TRANSFORMS.items(): log.debug(f"Applying ONNX transform: {name}") graph = transform(graph) graph.toposort() graph = fold_constants(graph) graph.cleanup() if output_path is None: return gs.export_onnx(graph) export_to_onnx_with_external_data(graph, output_path, **kwargs) return onnx.load(output_path) def register_onnx_transform(transform: OnnxTransform) -> OnnxTransform: """Registers a ONNX transform globally. Note that the registration order matters. Use this function as a decorator to register your custom ONNX transform. For example: @register_onnx_transform def do_something_on_onnx_graph(graph: gs.Graph) -> gs.Graph: ... """ name = transform.__name__ if name in ONNX_TRANSFORMS: log.debug_warning(f"Overwriting existing ONNX transform: {name}") ONNX_TRANSFORMS[name] = transform return transform @register_onnx_transform def fold_trilu_constants(graph: gs.Graph) -> gs.Graph: """Folds Trilu ops if constant-foldable. Note that this transformation is a workaround for the missing support for the Trilu op in onnx-runtime Args: graph (gs.Graph): a ONNX graph. Returns: gs.Graph: the ONNX graph with constant-foldable Trilu ops removed. """ for node in graph.nodes: if node.op == "Trilu": input_node = input_node_of(node, 0) if input_node is None or "value" not in input_node.attrs: continue input_values: np.ndarray = input_node.attrs["value"].values k_node = input_node_of(node, 1) if k_node is None or "value" not in k_node.attrs: continue k_values: np.ndarray = k_node.attrs["value"].values folded_values: np.ndarray = np.tril(input_values, k_values) output_tensor: gs.Variable = node.outputs[0] output_tensor.inputs.clear() output_const = gs.Constant(name=f"{node.name}_const", values=folded_values) const_node = gs.Node( op="Constant", name=f"{node.name}_folded", attrs=OrderedDict([("value", output_const)]), outputs=[output_tensor], ) graph.nodes.remove(node) graph.nodes.append(const_node) log.debug(f"Replaced {nodestr(node)} by {nodestr(const_node)}") graph.cleanup() graph.toposort() return graph @register_onnx_transform def eliminate_nop_dropouts(graph: gs.Graph) -> gs.Graph: """Eliminates all Dropout ops with no effect. Args: graph (gs.Graph): a ONNX graph. Returns: gs.Graph: the ONNX graph with meaningless Dropout ops removed. """ for node in graph.nodes: remove_if_dropout_op_with_ratio_zero(node, graph) graph.cleanup() graph.toposort() return graph @register_onnx_transform def eliminate_nop_cast(graph: gs.Graph) -> gs.Graph: """Eliminates all Cast ops with no effect. Args: graph (gs.Graph): a ONNX graph. Returns: gs.Graph: the ONNX graph with meaningless Cast ops removed. """ for node in graph.nodes: remove_if_cast_op_with_no_effect(node, graph) graph.cleanup() graph.toposort() return graph @register_onnx_transform def synchronize_floating_point_types(graph: gs.Graph) -> gs.Graph: """Synchronizes all floating points types used in the graph as the most common one. Args: graph (gs.Graph): a ONNX graph. Returns: gs.Graph: the ONNX graph with only one floating point type. """ floating_point_dtypes: list[np.dtype] = [ *filter(is_floating_point, (t.dtype for t in graph.tensors().values())) ] counter = Counter(floating_point_dtypes) log.debug(f"Counts of floating point types: {counter}") if len(counter) == 0: log.debug("No tensor with floating point type found in the graph") return graph most_common_dtype = counter.most_common(1)[0][0] log.debug(f"Most common floating point type: {most_common_dtype}") if len(counter) > 1: log.warning( "More than one floating point types are used in the graph (" f'{", ".join(f"{value} tensors of type {key.name}" for key, value in OrderedDict(counter).items())}). ' f"Will use the most common one: {most_common_dtype}" ) for node in graph.nodes: cast_constant_input_fp_tensors_of(node, most_common_dtype) cast_output_fp_tensors_of(node, most_common_dtype) return eliminate_nop_cast(graph) @register_onnx_transform def fold_nodes_after_conv(graph: gs.Graph) -> gs.Graph: """Fold foldable element-wise operations after convolution into convolution's weight and bias. Args: graph (gs.Graph): a ONNX graph. Returns: gs.Graph: the transformed ONNX graph """ # pylint: disable=too-many-statements def _get_constant_or_none(tensor: TensorType) -> Optional[gs.Constant]: if isinstance(tensor, gs.Constant): return tensor if ( len(tensor.inputs) == 1 and tensor.inputs[0].op == "Constant" and isinstance(tensor.inputs[0].attrs.get("value"), gs.Constant) ): return tensor.inputs[0].attrs.get("value") return None def _get_constant_conv_params( conv_node: gs.Node, ) -> Optional[tuple[gs.Constant, Optional[gs.Constant], Optional[gs.Constant]]]: if conv_node.op != "Conv": raise ValueError( f"Expected a `Conv` operation but received `{conv_node.op}`" ) # weight is required field for conv conv_weight = conv_node.inputs[1] # bias is optional input for conv conv_bias = conv_node.inputs[2] if len(conv_node.inputs) == 3 else None # we do not consider zero point for now quantizer_step_size = None if ( isinstance(conv_weight, gs.Variable) and get_defining_op_type(conv_weight) == "DequantizeLinear" ): dequantize_node = conv_weight.inputs[0] if get_defining_op_type(dequantize_node.inputs[0]) != "QuantizeLinear": # parent node of DequantizeLinear is not QuantizeLinear return None quantizer_step_size = dequantize_node.inputs[1] if len(quantizer_step_size.outputs) != 2: # quantizer step size used somewhere else than current quantizers, # note that QuantizeLinear and DequantizeLinear from same quantizer shares the same tensor return None quantize_node = dequantize_node.inputs[0].inputs[0] if quantize_node.inputs[1] is not quantizer_step_size: # QuantizeLinear does not share the same tensor as step size with DequantizeLinear return None quantizer_step_size = _get_constant_or_none(quantizer_step_size) if quantizer_step_size is None: # quantizer step size is variable return None conv_weight = quantize_node.inputs[0] conv_weight = _get_constant_or_none(conv_weight) if conv_weight is None: return None if conv_bias is not None: conv_bias = _get_constant_or_none(conv_bias) if conv_bias is None: return None assert ( isinstance(conv_weight, gs.Constant) and isinstance(conv_bias, (gs.Constant, type(None))) and isinstance(quantizer_step_size, (gs.Constant, type(None))) ) return conv_weight, conv_bias, quantizer_step_size def _get_constant_param_to_fold(node_to_fold: gs.Node) -> Optional[gs.Constant]: parameter_to_fold = [ _get_constant_or_none(tensor) for tensor in node_to_fold.inputs ] parameter_to_fold = [ tensor for tensor in parameter_to_fold if tensor is not None ] if len(parameter_to_fold) == 1: return parameter_to_fold[0] return None def _is_foldable(conv_output_tensor: gs.Variable) -> bool: # convolution output is dependant to other node than conv or convolution output is used more than once if len(conv_output_tensor.inputs) != 1 or len(conv_output_tensor.outputs) != 1: return False conv_node: gs.Node = conv_output_tensor.inputs[0] node_to_fold: gs.Node = conv_output_tensor.outputs[0] # only the element-wise operations are foldable, and we cannot fold Div(param, Conv(x, w, b)) if node_to_fold.op not in ("Add", "Sub", "Mul", "Div") or ( node_to_fold.op == "Div" and node_to_fold.inputs[1] is conv_output_tensor ): return False # all involved tensors should be constant parameter_to_fold = _get_constant_param_to_fold(node_to_fold) conv_node_params = _get_constant_conv_params(conv_node) if parameter_to_fold is None or conv_node_params is None: return False conv_weight, conv_bias, quantizer_step_size = conv_node_params # disclaimer: we now only consider 2d convolution, with this removed, conditions below should be revisited if conv_weight.values.ndim != 4 or ( conv_bias is not None and conv_bias.values.ndim != 1 ): return False # cannot broadcast parameter to fold to convolution output channel dimension if ( parameter_to_fold.values.shape.count(1) < len(parameter_to_fold.values.shape) - 1 and parameter_to_fold.values.size != conv_weight.values.shape[0] ): return False # cannot broadcast parameter to fold to per-tensor quantization step size if ( quantizer_step_size is not None and parameter_to_fold.values.size != 1 and quantizer_step_size.values.size == 1 ): return False return True def _fold(conv_node: gs.Node, node_to_fold: gs.Node) -> None: def _fold_mul_or_div(conv_node: gs.Node, mul_or_div_node: gs.Node) -> None: assert conv_node.op == "Conv" and mul_or_div_node.op in ("Mul", "Div") conv_params = _get_constant_conv_params(conv_node) assert conv_params is not None conv_weight, conv_bias, quantizer_step_size = conv_params param_to_fold = _get_constant_param_to_fold(mul_or_div_node) assert param_to_fold is not None value_to_fold: np.ndarray = ( param_to_fold.values if mul_or_div_node.op == "Mul" else (param_to_fold.values**-1) ) value_to_fold = np.broadcast_to( value_to_fold.squeeze(), conv_weight.values.shape[0] ) if conv_bias is not None: conv_bias.values *= value_to_fold if quantizer_step_size is not None: if quantizer_step_size.values.size == 1: assert np.all(value_to_fold == value_to_fold[0]) quantizer_step_size.values *= np.abs(value_to_fold)[0] else: quantizer_step_size.values *= np.abs(value_to_fold) target_shape = ( value_to_fold.shape[0], 1, 1, 1, ) conv_weight.values *= value_to_fold.reshape(target_shape) conv_node.outputs = mul_or_div_node.outputs mul_or_div_node.outputs.clear() def _fold_add_or_sub(conv_node: gs.Node, add_or_sub_node: gs.Node) -> None: assert conv_node.op == "Conv" and add_or_sub_node.op in ("Add", "Sub") conv_params = _get_constant_conv_params(conv_node) assert conv_params is not None conv_weight, conv_bias, _ = conv_params param_to_fold = _get_constant_param_to_fold(add_or_sub_node) assert param_to_fold is not None value_to_fold: np.ndarray = param_to_fold.values value_to_fold = np.broadcast_to( value_to_fold.squeeze(), conv_weight.values.shape[0] ) if add_or_sub_node.op == "Sub": if ( add_or_sub_node.inputs[1] is conv_node.outputs[0] ): # Sub(param, Conv(x, w, b)) conv_weight.values = -conv_weight.values if conv_bias is not None: conv_bias.values = -conv_bias.values else: # Sub(Conv(x, w, b), param) value_to_fold = -value_to_fold if conv_bias is not None: conv_bias.values += value_to_fold else: new_bias = gs.Constant( param_to_fold.name, value_to_fold, param_to_fold.data_location ) conv_node.inputs.append(new_bias) conv_node.outputs = add_or_sub_node.outputs add_or_sub_node.outputs.clear() if node_to_fold.op in ("Mul", "Div"): _fold_mul_or_div(conv_node, node_to_fold) elif node_to_fold.op in ("Add", "Sub"): _fold_add_or_sub(conv_node, node_to_fold) else: # for future extensibility, we might be able to fold more operations raise NotImplementedError() for node in graph.nodes: if node.op == "Conv": i = 0 while i < MAXIMUM_ITERATION: if _is_foldable(node.outputs[0]): log.debug( f"Folding {nodestr(node.outputs[0].outputs[0])} into {nodestr(node)}" ) _fold(node, node.outputs[0].outputs[0]) i += 1 continue break return graph # pylint: disable=missing-function-docstring def remove_if_dropout_op_with_ratio_zero(node: gs.Node, graph: gs.Graph) -> None: if node.op != "Dropout": return ratio_input_node = input_node_of(node, 1, 0) if ratio_input_node is None or "value" not in ratio_input_node.attrs: return if ratio_input_node.attrs["value"].values.item() != 0.0: return remove_if_has_unique_non_optional_input_and_unique_used_output(node, graph) def remove_if_cast_op_with_no_effect(node: gs.Node, graph: gs.Graph) -> None: if node.op != "Cast": return if "to" not in node.attrs: log.debug_warning(f'Missing required attribute "to" in {nodestr(node)}') return if len(node.inputs) != 1: log.debug_warning( f"{nodestr(node)} must have 1 input but {len(node.inputs)} found: {node.inputs}" ) return if len(node.outputs) != 1: log.debug_warning( f"{nodestr(node)} must have 1 output but {len(node.outputs)} found: {node.outputs}" ) return the_input = node.inputs[0] the_output = node.outputs[0] data_type = cast(TensorProto.DataType, node.attrs["to"]) dtype = get_numpy_type(data_type) if not isinstance(dtype, np.dtype): log.debug_warning( f'Failed to convert attribute "to": {TensorProto.DataType.Name(data_type)}' " of {nodestr(node)} into numpy type" ) return if the_input.dtype in (dtype, the_output.dtype): remove_if_has_unique_non_optional_input_and_unique_used_output(node, graph) def cast_constant_input_fp_tensors_of(node: gs.Node, dtype: np.dtype) -> None:
onnx_op = ONNXOp(node.op)
4
2023-12-08 06:41:50+00:00
12k
bolna-ai/bolna
bolna/agent_manager/assistant_manager.py
[ { "identifier": "BaseManager", "path": "bolna/agent_manager/base_manager.py", "snippet": "class BaseManager:\n def __init__(self):\n self.agent = \"bolna-agent\"" }, { "identifier": "TaskManager", "path": "bolna/agent_manager/task_manager.py", "snippet": "class TaskManager(BaseManager):\n def __init__(self, assistant_name, task_id, task, ws, input_parameters=None, context_data=None, user_id=None,\n assistant_id=None, run_id=None, connected_through_dashboard=False, cache = None):\n super().__init__()\n logger.info(f\"doing task {task}\")\n self.task_id = task_id\n self.assistant_name = assistant_name\n self.tools = {}\n self.websocket = ws\n self.task_config = task\n self.context_data = context_data\n self.connected_through_dashboard = connected_through_dashboard\n\n # Set up communication queues between processes\n self.audio_queue = asyncio.Queue()\n self.llm_queue = asyncio.Queue()\n self.synthesizer_queue = asyncio.Queue()\n\n self.pipelines = task['toolchain']['pipelines']\n self.textual_chat_agent = False\n if task['toolchain']['pipelines'][0] == \"llm\" and task[\"tools_config\"][\"llm_agent\"][\n \"agent_task\"] == \"conversation\":\n self.textual_chat_agent = False\n\n self.start_time = time.time()\n\n # Assistant persistance stuff\n self.user_id = user_id\n self.assistant_id = assistant_id\n self.run_id = run_id\n self.mark_set = set()\n\n self.conversation_ended = False\n\n # Prompts\n self.prompts, self.system_prompt = {}, {}\n\n self.input_parameters = input_parameters\n\n self.queues = {\n \"transcriber\": self.audio_queue,\n \"llm\": self.llm_queue,\n \"synthesizer\": self.synthesizer_queue\n }\n\n if task_id == 0:\n if self.task_config[\"tools_config\"][\"input\"][\"provider\"] in SUPPORTED_INPUT_HANDLERS.keys():\n logger.info(f\"Connected through dashboard {connected_through_dashboard}\")\n if connected_through_dashboard:\n # If connected through dashboard get basic dashboard class\n input_handler_class = SUPPORTED_INPUT_HANDLERS.get(\"default\")\n else:\n input_handler_class = SUPPORTED_INPUT_HANDLERS.get(\n self.task_config[\"tools_config\"][\"input\"][\"provider\"])\n self.tools[\"input\"] = input_handler_class(self.queues, self.websocket, get_required_input_types(task),\n self.mark_set, self.connected_through_dashboard)\n else:\n raise \"Other input handlers not supported yet\"\n\n if self.task_config[\"tools_config\"][\"output\"] is None:\n logger.info(\"Not setting up any output handler as it is none\")\n elif self.task_config[\"tools_config\"][\"output\"][\"provider\"] in SUPPORTED_OUTPUT_HANDLERS.keys():\n output_handler_class = SUPPORTED_OUTPUT_HANDLERS.get(self.task_config[\"tools_config\"][\"output\"][\"provider\"])\n if self.task_config[\"tools_config\"][\"output\"][\"provider\"] == \"twilio\":\n logger.info(f\"Making sure that the sampling rate for output handler is 8000\")\n self.task_config['tools_config']['synthesizer']['provider_config']['sampling_rate'] = 8000\n self.task_config['tools_config']['synthesizer']['audio_format'] = 'pcm'\n self.tools[\"output\"] = output_handler_class(self.websocket, self.mark_set)\n else:\n raise \"Other input handlers not supported yet\"\n\n # Current conversation state\n self.current_request_id = None\n self.previous_request_id = None\n self.llm_rejected_request_ids = set()\n self.llm_processed_request_ids = set()\n\n # Agent stuff\n self.history = []\n self.label_flow = []\n\n # Setup IO SERVICE, TRANSCRIBER, LLM, SYNTHESIZER\n self.llm_task = None\n self.synthesizer_tasks = []\n\n # state of conversation\n self.was_long_pause = False\n\n # Call conversations\n self.call_sid = None\n self.stream_sid = None\n\n # metering\n self.transcriber_duration = 0\n self.synthesizer_characters = 0\n self.ended_by_assistant = False\n\n self.extracted_data = None\n self.summarized_data = None\n\n #self.stream = not connected_through_dashboard and \"synthesizer\" in self.task_config[\"tools_config\"] and self.task_config[\"tools_config\"][\"synthesizer\"][\"stream\"]\n self.stream = not connected_through_dashboard #Currently we are allowing only realtime conversation based usecases. Hence it'll always be true unless connected through dashboard\n self.is_local = False\n\n # Memory\n self.cache = cache\n logger.info(\"task initialization completed\")\n\n # Sequence id for interruption\n self.curr_sequence_id = 0\n self.sequence_ids = set()\n\n async def load_prompt(self, assistant_name, task_id, is_local):\n logger.info(\"prompt and config setup started\")\n self.is_local = is_local\n prompt_responses = await get_prompt_responses(assistant_name, assistant_id=self.assistant_id,\n user_id=self.user_id, local=self.is_local)\n self.prompts = prompt_responses[\"task_{}\".format(task_id + 1)]\n\n if \"system_prompt\" in self.prompts:\n # This isn't a graph based agent\n enriched_prompt = self.prompts[\"system_prompt\"]\n if self.context_data is not None:\n enriched_prompt = update_prompt_with_context(self.prompts[\"system_prompt\"], self.context_data)\n self.system_prompt = {\n 'role': \"system\",\n 'content': enriched_prompt\n }\n else:\n self.system_prompt = {\n 'role': \"system\",\n 'content': \"\"\n }\n\n self.history = [self.system_prompt]\n\n llm_config = {\n \"streaming_model\": self.task_config[\"tools_config\"][\"llm_agent\"][\"streaming_model\"],\n \"classification_model\": self.task_config[\"tools_config\"][\"llm_agent\"][\"classification_model\"]\n }\n\n # setting transcriber\n if self.task_config[\"tools_config\"][\"transcriber\"] is not None:\n provider = \"playground\" if self.connected_through_dashboard else self.task_config[\"tools_config\"][\"input\"][\n \"provider\"]\n self.task_config[\"tools_config\"][\"transcriber\"][\"input_queue\"] = self.audio_queue\n if self.task_config[\"tools_config\"][\"transcriber\"][\"model\"] in SUPPORTED_TRANSCRIBER_MODELS.keys():\n if self.connected_through_dashboard:\n self.task_config[\"tools_config\"][\"transcriber\"][\"stream\"] = False\n transcriber_class = SUPPORTED_TRANSCRIBER_MODELS.get(\n self.task_config[\"tools_config\"][\"transcriber\"][\"model\"])\n self.tools[\"transcriber\"] = transcriber_class(provider, **self.task_config[\"tools_config\"][\"transcriber\"])\n # setting synthesizer\n logger.info(f\"Synthesizer config: {self.task_config['tools_config']['synthesizer']}\")\n if self.task_config[\"tools_config\"][\"synthesizer\"] is not None:\n self.synthesizer_provider = self.task_config[\"tools_config\"][\"synthesizer\"].pop(\"provider\")\n synthesizer_class = SUPPORTED_SYNTHESIZER_MODELS.get(self.synthesizer_provider)\n provider_config = self.task_config[\"tools_config\"][\"synthesizer\"].pop(\"provider_config\")\n if self.connected_through_dashboard:\n self.task_config[\"tools_config\"][\"synthesizer\"][\"audio_format\"] = \"mp3\" # Hard code mp3 if we're connected through dashboard\n self.task_config[\"tools_config\"][\"synthesizer\"][\"stream\"] = False #Hardcode stream to be False as we don't want to get blocked by a __listen_synthesizer co-routine\n self.tools[\"synthesizer\"] = synthesizer_class(**self.task_config[\"tools_config\"][\"synthesizer\"], **provider_config)\n llm_config[\"max_tokens\"] = self.task_config[\"tools_config\"][\"synthesizer\"].get('max_tokens')\n llm_config[\"buffer_size\"] = self.task_config[\"tools_config\"][\"synthesizer\"].get('buffer_size')\n\n # setting llm\n if self.task_config[\"tools_config\"][\"llm_agent\"][\"family\"] in SUPPORTED_LLM_MODELS.keys():\n llm_class = SUPPORTED_LLM_MODELS.get(self.task_config[\"tools_config\"][\"llm_agent\"][\"family\"])\n llm = llm_class(**llm_config)\n else:\n raise Exception(f'LLM {self.task_config[\"tools_config\"][\"llm_agent\"][\"family\"]} not supported')\n\n if self.task_config[\"task_type\"] == \"conversation\":\n if self.task_config[\"tools_config\"][\"llm_agent\"][\"agent_flow_type\"] == \"streaming\":\n self.tools[\"llm_agent\"] = StreamingContextualAgent(llm)\n elif self.task_config[\"tools_config\"][\"llm_agent\"][\"agent_flow_type\"] in (\"preprocessed\", \"formulaic\"):\n preprocessed = self.task_config[\"tools_config\"][\"llm_agent\"][\"agent_flow_type\"] == \"preprocessed\"\n logger.info(f\"LLM TYPE {type(llm)}\")\n self.tools[\"llm_agent\"] = GraphBasedConversationAgent(llm, context_data=self.context_data,\n prompts=self.prompts, preprocessed=preprocessed)\n elif self.task_config[\"task_type\"] == \"extraction\":\n logger.info(\"Setting up extraction agent\")\n self.tools[\"llm_agent\"] = ExtractionContextualAgent(llm, prompt=self.system_prompt)\n self.extracted_data = None\n elif self.task_config[\"task_type\"] == \"summarization\":\n logger.info(\"Setting up summarization agent\")\n self.tools[\"llm_agent\"] = SummarizationContextualAgent(llm, prompt=self.system_prompt)\n self.summarized_data = None\n\n logger.info(\"prompt and config setup completed\")\n ########################\n # LLM task\n ########################\n\n async def _handle_llm_output(self, next_step, text_chunk, should_bypass_synth, meta_info):\n logger.info(\"received text from LLM for output processing: {}\".format(text_chunk))\n if next_step == \"synthesizer\" and not should_bypass_synth:\n task = asyncio.gather(self._synthesize(create_ws_data_packet(text_chunk, meta_info)))\n self.synthesizer_tasks.append(asyncio.ensure_future(task))\n elif self.tools[\"output\"] is not None:\n await self.tools[\"output\"].handle(create_ws_data_packet(text_chunk, meta_info))\n\n def _get_next_step(self, sequence, origin):\n try:\n return next((self.pipelines[sequence][i + 1] for i in range(len(self.pipelines[sequence]) - 1) if\n self.pipelines[sequence][i] == origin), \"output\")\n except Exception as e:\n logger.error(f\"Error getting next step: {e}\")\n\n def _set_call_details(self, message):\n if self.call_sid is not None and self.stream_sid is not None and \"call_sid\" not in message['meta_info'] and \"stream_sid\" not in message['meta_info']:\n return\n\n if \"call_sid\" in message['meta_info']:\n self.call_sid = message['meta_info'][\"call_sid\"]\n if \"stream_sid\" in message:\n self.stream_sid = message['meta_info'][\"stream_sid\"]\n\n async def _process_followup_task(self, message, sequence, meta_info):\n message = format_messages(self.input_parameters[\"messages\"]) # Remove the initial system prompt\n self.history.append({\n 'role': 'user',\n 'content': message\n })\n\n json_data = await self.tools[\"llm_agent\"].generate(self.history)\n if \"summary\" in json_data:\n logger.info(f'Summary {json_data[\"summary\"]}')\n self.summarized_data = json_data[\"summary\"]\n else:\n json_data = clean_json_string(json_data)\n logger.info(f\"After replacing {json_data}\")\n json_data = json.loads(json_data)\n self.extracted_data = json_data\n logger.info(\"Done\")\n\n async def _process_conversation_preprocessed_task(self, message, sequence, meta_info):\n if self.task_config[\"tools_config\"][\"llm_agent\"]['agent_flow_type'] == \"preprocessed\":\n llm_response = \"\"\n self.history.append({\n 'role': 'user',\n 'content': message['data']\n })\n start_time = time.time()\n\n async for text_chunk in self.tools['llm_agent'].generate(self.history, stream=True, synthesize=True,\n label_flow=self.label_flow):\n if text_chunk == \"<end_of_conversation>\":\n logger.info(\"Got end of conversation. I'm stopping now\")\n self.conversation_ended = True\n await asyncio.sleep(5) #Make sure that the message is passed over and complete before cutting the handler\n await self.tools[\"input\"].stop_handler()\n logger.info(\"Stopped input handler\")\n if \"transcriber\" in self.tools and not self.connected_through_dashboard:\n logger.info(\"Stopping transcriber\")\n await self.tools[\"transcriber\"].toggle_connection()\n await asyncio.sleep(5) # Making sure whatever message was passed is over\n return\n logger.info(f\"Text chunk {text_chunk}\")\n if is_valid_md5(text_chunk):\n self.synthesizer_tasks.append(asyncio.create_task(\n self._synthesize(create_ws_data_packet(text_chunk, meta_info, is_md5_hash=True))))\n else:\n self.synthesizer_tasks.append(asyncio.create_task(\n self._synthesize(create_ws_data_packet(text_chunk, meta_info, is_md5_hash=False))))\n\n async def _process_conversation_formulaic_task(self, message, sequence, meta_info):\n self.history.append({\n 'role': 'user',\n 'content': message['data']\n })\n start_time = time.time()\n llm_response = \"\"\n logger.info(\"Agent flow is formulaic and hence moving smoothly\")\n async for text_chunk in self.tools['llm_agent'].generate(self.history, stream=True, synthesize=True):\n if is_valid_md5(text_chunk):\n self.synthesizer_tasks.append(asyncio.create_task(\n self._synthesize(create_ws_data_packet(text_chunk, meta_info, is_md5_hash=True))))\n else:\n # TODO Make it more modular\n llm_response += \" \" +text_chunk\n next_step = self._get_next_step(sequence, \"llm\")\n if next_step == \"synthesizer\":\n task = asyncio.gather(self._synthesize(create_ws_data_packet(text_chunk, meta_info)))\n self.synthesizer_tasks.append(asyncio.ensure_future(task))\n else:\n logger.info(f\"Sending output text {sequence}\")\n await self.tools[\"output\"].handle(create_ws_data_packet(text_chunk, meta_info))\n self.synthesizer_tasks.append(asyncio.create_task(\n self._synthesize(create_ws_data_packet(text_chunk, meta_info, is_md5_hash=False))))\n\n async def _process_conversation_task(self, message, sequence, meta_info):\n next_step = None\n logger.info(\"agent flow is not preprocessed\")\n llm_response = \"\"\n self.history.append({\n 'role': 'user',\n 'content': message['data']\n })\n\n \n start_time = time.time()\n should_bypass_synth = 'bypass_synth' in meta_info and meta_info['bypass_synth'] == True\n next_step = self._get_next_step(sequence, \"llm\")\n curr_sequence_id = self.curr_sequence_id + 1\n meta_info[\"sequence_id\"] = curr_sequence_id\n cache_response = self.cache.get(get_md5_hash(message['data'])) if self.cache is not None else None\n \n if cache_response is not None:\n logger.info(\"It was a cache hit and hence simply returning\")\n await self._handle_llm_output(next_step, cache_response, should_bypass_synth, meta_info)\n else:\n async for llm_message in self.tools['llm_agent'].generate(self.history, synthesize=True):\n text_chunk, end_of_llm_stream = llm_message\n logger.info(f\"###### time to get the first chunk {time.time() - start_time} {text_chunk}\")\n llm_response += \" \" + text_chunk\n if end_of_llm_stream:\n meta_info[\"end_of_llm_stream\"] = True\n if self.stream:\n await self._handle_llm_output(next_step, text_chunk, should_bypass_synth, meta_info)\n\n if not self.stream:\n meta_info[\"end_of_llm_stream\"]= True\n await self._handle_llm_output(next_step, llm_response, should_bypass_synth, meta_info)\n\n #add to cache\n # if self.cache is not None:\n # self.cache.set(get_md5_hash(message['data']), llm_response)\n\n if self.current_request_id in self.llm_rejected_request_ids:\n logger.info(\"User spoke while LLM was generating response\")\n else:\n self.history.append({\"role\": \"assistant\", \"content\": llm_response})\n\n # TODO : Write a better check for completion prompt \n #answer = await self.tools[\"llm_agent\"].check_for_completion(self.history)\n answer = False\n if answer:\n logger.info(\"Got end of conversation. I'm stopping now\")\n self.conversation_ended = True\n self.ended_by_assistant = True\n await self.tools[\"input\"].stop_handler()\n logger.info(\"Stopped input handler\")\n if \"transcriber\" in self.tools and not self.connected_through_dashboard:\n logger.info(\"Stopping transcriber\")\n await self.tools[\"transcriber\"].toggle_connection()\n await asyncio.sleep(5) # Making sure whatever message was passed is over\n return\n\n self.llm_processed_request_ids.add(self.current_request_id)\n llm_response = \"\"\n\n\n def _extract_sequence_and_meta(self, message):\n sequence, meta_info = None, None\n if isinstance(message, dict) and \"meta_info\" in message:\n self._set_call_details(message)\n sequence = message[\"meta_info\"][\"sequence\"]\n meta_info = message[\"meta_info\"]\n return sequence, meta_info\n\n def _is_extraction_task(self):\n return self.task_config[\"task_type\"] == \"extraction\"\n\n def _is_summarization_task(self):\n return self.task_config[\"task_type\"] == \"summarization\"\n\n def _is_conversation_task(self):\n return self.task_config[\"task_type\"] == \"conversation\"\n\n def _is_preprocessed_flow(self):\n return self.task_config[\"tools_config\"][\"llm_agent\"]['agent_flow_type'] == \"preprocessed\"\n\n def _is_formulaic_flow(self):\n return self.task_config[\"tools_config\"][\"llm_agent\"]['agent_flow_type'] == \"formulaic\"\n\n # This is used only in the case it's a text based chatbot\n async def _listen_llm_input_queue(self):\n logger.info(\n f\"Starting listening to LLM queue as either Connected to dashboard = {self.connected_through_dashboard} or it's a textual chat agent {self.textual_chat_agent}\")\n while True:\n try:\n ws_data_packet = await self.queues[\"llm\"].get()\n logger.info(f\"ws_data_packet {ws_data_packet}\")\n bos_packet = create_ws_data_packet(\"<beginning_of_stream>\", ws_data_packet['meta_info'])\n await self.tools[\"output\"].handle(bos_packet)\n await self._run_llm_task(\n ws_data_packet) # In case s3 is down and it's an audio processing job, this might produce blank message on the frontend of playground.\n eos_packet = create_ws_data_packet(\"<end_of_stream>\", ws_data_packet['meta_info'])\n await self.tools[\"output\"].handle(eos_packet)\n\n except Exception as e:\n traceback.print_exc()\n logger.error(f\"Something went wrong with LLM queue {e}\")\n break\n\n async def _run_llm_task(self, message):\n logger.info(\"running llm based agent\")\n sequence, meta_info = self._extract_sequence_and_meta(message)\n\n try:\n if self._is_extraction_task() or self._is_summarization_task():\n await self._process_followup_task(message, sequence, meta_info)\n elif self._is_conversation_task():\n if self._is_preprocessed_flow():\n await self._process_conversation_preprocessed_task(message, sequence, meta_info)\n\n elif self._is_formulaic_flow():\n await self._process_conversation_formulaic_task(message, sequence, meta_info)\n else:\n await self._process_conversation_task(message, sequence, meta_info)\n else:\n logger.error(\"unsupported task type: {}\".format(self.task_config[\"task_type\"]))\n self.llm_task = None\n except Exception as e:\n traceback.print_exc()\n logger.error(f\"Something went wrong in llm: {e}\")\n\n async def process_transcriber_request(self, meta_info):\n if not self.current_request_id or self.current_request_id != meta_info[\"request_id\"]:\n self.previous_request_id, self.current_request_id = self.current_request_id, meta_info[\"request_id\"]\n\n sequence = meta_info[\"sequence\"]\n\n # check if previous request id is not in transmitted request id\n if self.previous_request_id is None:\n is_first_message = True\n elif self.previous_request_id not in self.llm_processed_request_ids:\n self.llm_rejected_request_ids.add(self.previous_request_id)\n else:\n skip_append_to_data = False\n return sequence\n\n async def process_interruption(self):\n await self.tools[\"output\"].handle_interruption()\n self.sequence_ids = set() #Remove all the sequence ids so subsequent won't be processed\n if self.llm_task is not None:\n self.llm_task.cancel()\n self.llm_task = None\n self.was_long_pause = True\n\n # if len(self.synthesizer_tasks) > 0:\n # for synth_task in self.synthesizer_tasks:\n # synth_task.cancel()\n # self.synthesizer_tasks = []\n \n\n ########################\n # Transcriber task\n ########################\n\n async def _handle_transcriber_output(self, next_task, transcriber_message, meta_info):\n if next_task == \"llm\":\n meta_info[\"origin\"] = \"transcriber\"\n self.llm_task = asyncio.create_task(\n self._run_llm_task(create_ws_data_packet(transcriber_message, meta_info)))\n elif next_task == \"synthesizer\":\n self.synthesizer_tasks.append(asyncio.create_task(\n self._synthesize(create_ws_data_packet(transcriber_message, meta_info))))\n else:\n logger.info(f\"Need to separate out output task\")\n\n async def _listen_transcriber(self):\n transcriber_message = \"\"\n start_time = None\n try:\n if self.stream:\n async for message in self.tools[\"transcriber\"].transcribe():\n if message['data'] == \"transcriber_connection_closed\":\n self.transcriber_duration += message['meta_info'][\"transcriber_duration\"]\n logger.info(\"transcriber connection closed\")\n return\n\n self._set_call_details(message)\n meta_info = message[\"meta_info\"]\n sequence = await self.process_transcriber_request(meta_info)\n\n if message['data'] == \"TRANSCRIBER_BEGIN\":\n logger.info(\"starting transcriber stream\")\n start_time = time.time()\n await self.tools[\"output\"].handle_interruption()\n if self.llm_task is not None:\n logger.info(\"Cancelling LLM Task as it's on\")\n self.llm_task.cancel()\n self.llm_task = None\n self.was_long_pause = True\n\n if len(self.synthesizer_tasks) > 0:\n logger.info(\"Cancelling Synthesizer tasks\")\n for synth_task in self.synthesizer_tasks:\n synth_task.cancel()\n self.synthesizer_tasks = []\n continue\n elif message['data'] == \"TRANSCRIBER_END\":\n logger.info(\"transcriber stream and preparing the next step\")\n next_task = self._get_next_step(sequence, \"transcriber\")\n logger.info(f'got the next task {next_task}')\n if self.was_long_pause:\n logger.info(\n f\"Seems like there was a long pause {self.history[-1]['content']} , {transcriber_message}\")\n message = self.history[-1]['content'] + \" \" + transcriber_message\n self.history = self.history[:-1]\n self.was_long_pause = False\n\n logger.info(f'invoking next_task {next_task} with transcriber_message: {transcriber_message}')\n await self._handle_transcriber_output(next_task, transcriber_message, meta_info)\n transcriber_message = \"\"\n continue\n else:\n logger.info(\"processed text from transcriber: {}\".format(message['data']))\n transcriber_message += message['data']\n else:\n logger.info(\"Not a streaming conversation. Hence getting a full blown transcript\")\n async for message in self.tools[\"transcriber\"].transcribe():\n logger.info(f\"message from transcriber {message}\")\n sequence = message[\"meta_info\"][\"sequence\"]\n next_task = self._get_next_step(sequence, \"transcriber\")\n self.transcriber_duration += message[\"meta_info\"][\"transcriber_duration\"] if \"transcriber_duration\" in message[\"meta_info\"] else 0\n await self._handle_transcriber_output(next_task, message['data'], message[\"meta_info\"])\n\n except Exception as e:\n traceback.print_exc()\n logger.error(f\"Error in transcriber {e}\")\n\n \n\n async def __listen_synthesizer(self):\n try:\n if self.stream and self.synthesizer_provider != \"polly\":\n logger.info(\"Opening websocket connection to synthesizer\")\n await self.tools[\"synthesizer\"].open_connection()\n\n while True:\n logger.info(\"Listening to synthesizer\")\n async for message in self.tools[\"synthesizer\"].generate():\n if not self.conversation_ended and message[\"meta_info\"][\"sequence_id\"] in self.sequence_ids:\n await self.tools[\"output\"].handle(message)\n \n if \"end_of_synthesizer_stream\" in message[\"meta_info\"] and message[\"meta_info\"][\"end_of_synthesizer_stream\"]:\n logger.info(f\"Got End of stream and hence removing from sequence ids {self.sequence_ids} {message['meta_info']['sequence_id']}\")\n self.sequence_ids.remove(message[\"meta_info\"][\"sequence_id\"])\n await asyncio.sleep(1)\n\n except Exception as e:\n logger.error(f\"Error in synthesizer {e}\")\n\n async def _synthesize(self, message):\n meta_info = message[\"meta_info\"]\n text = message[\"data\"]\n meta_info[\"type\"] = \"audio\"\n try:\n if meta_info[\"is_md5_hash\"]:\n logger.info('sending preprocessed audio response to {}'.format(\n self.task_config[\"tools_config\"][\"output\"][\"provider\"]))\n audio_chunk = await get_raw_audio_bytes_from_base64(self.assistant_name, text,\n self.task_config[\"tools_config\"][\"output\"][\n \"format\"], local=self.is_local,\n user_id=self.user_id,\n assistant_id=self.assistant_id)\n\n #TODO: Either load IVR audio into memory before call or user s3 iter_cunks\n # This will help with interruption in IVR\n for chunk in yield_chunks_from_memory(audio_chunk):\n await self.tools[\"output\"].handle(create_ws_data_packet(audio_chunk, meta_info))\n \n elif self.synthesizer_provider in SUPPORTED_SYNTHESIZER_MODELS.keys():\n self.sequence_ids.add(meta_info[\"sequence_id\"])\n logger.info(f\"After adding into sequence id {self.sequence_ids}\")\n logger.info('sending text to {} for generation: {} '.format(self.synthesizer_provider, text))\n self.synthesizer_characters += len(text)\n await self.tools[\"synthesizer\"].push(message)\n else:\n logger.info(\"other synthesizer models not supported yet\")\n except Exception as e:\n logger.error(f\"Error in synthesizer: {e}\")\n\n async def run(self):\n \"\"\"\n Run will start a listener that will continuously listen to the websocket\n - If type is \"audio\": it'll pass it to transcriber\n - Transcriber will pass it deepgram\n - Deepgram will respond\n \n \"\"\"\n try:\n if self.task_id == 0:\n # Create transcriber and synthesizer tasks\n logger.info(\"starting task_id {}\".format(self.task_id))\n tasks = [asyncio.create_task(self.tools['input'].handle())]\n if \"transcriber\" in self.tools:\n tasks.append(asyncio.create_task(self._listen_transcriber()))\n if self.connected_through_dashboard and self.task_config['task_type'] == \"conversation\":\n logger.info(\n \"Since it's connected through dashboard, I'll run listen_llm_tas too in case user wants to simply text\")\n self.llm_queue_task = asyncio.create_task(self._listen_llm_input_queue())\n \n if \"synthesizer\" in self.tools and self._is_conversation_task():\n logger.info(\"Starting synthesizer task\")\n self.synthesizer_task = asyncio.create_task(self.__listen_synthesizer())\n try:\n await asyncio.gather(*tasks)\n except Exception as e:\n logger.error(f\"Error: {e}\")\n\n # Close connections\n # if \"transcriber\" in self.tools:\n # logger.info(f\"Closing transcriber\")\n # await self.tools[\"transcriber\"].toggle_connection()\n # await asyncio.sleep(5) #Making sure whatever message was passed is over\n\n logger.info(\"Conversation completed\")\n else:\n # Run agent followup tasks\n try:\n await self._run_llm_task(self.input_parameters)\n except Exception as e:\n logger.error(f\"Could not do llm call: {e}\")\n raise Exception(e)\n\n except asyncio.CancelledError as e:\n # Cancel all tasks on cancel\n traceback.print_exc()\n self.handle_cancellation(f\"Websocket got cancelled {self.task_id}\")\n\n except Exception as e:\n # Cancel all tasks on error\n self.handle_cancellation(f\"Exception occurred {e}\")\n raise Exception(e)\n\n finally:\n # Construct output\n if self.task_id == 0:\n output = {\"messages\": self.history, \"conversation_time\": time.time() - self.start_time,\n \"label_flow\": self.label_flow, \"call_sid\": self.call_sid, \"stream_sid\": self.stream_sid,\n \"transcriber_duration\": self.transcriber_duration,\n \"synthesizer_characters\": self.synthesizer_characters, \"ended_by_assistant\": self.ended_by_assistant}\n else:\n output = self.input_parameters\n if self.task_config[\"task_type\"] == \"extraction\":\n output = { \"extracted_data\" : self.extracted_data, \"task_type\": \"extraction\"}\n elif self.task_config[\"task_type\"] == \"summarization\":\n output = {\"summary\" : self.summarized_data, \"task_type\": \"summarization\"}\n\n return output\n\n def handle_cancellation(self, message):\n try:\n # Cancel all tasks on cancellation\n tasks = [t for t in asyncio.all_tasks() if t is not asyncio.current_task()]\n if self.synthesizer_task:\n self.synthesizer_task.cancel()\n logger.info(f\"tasks {len(tasks)}\")\n for task in tasks:\n logger.info(f\"Cancelling task {task.get_name()}\")\n task.cancel()\n logger.info(message)\n except Exception as e:\n traceback.print_exc()\n logger.info(e)" }, { "identifier": "configure_logger", "path": "bolna/helpers/logger_config.py", "snippet": "def configure_logger(file_name, enabled=True, logging_level='INFO'):\n if logging_level not in VALID_LOGGING_LEVELS:\n logging_level = \"INFO\"\n\n logging.basicConfig(\n level=logging_level,\n format=\"%(asctime)s.%(msecs)03d %(levelname)s {%(module)s} [%(funcName)s] %(message)s\",\n datefmt=\"%Y-%m-%d %H:%M:%S\",\n )\n\n logger = logging.getLogger(file_name)\n\n if not enabled:\n logger.disabled = True\n return logger" } ]
import asyncio import uvloop import time import os import requests import tiktoken from twilio.rest import Client from .base_manager import BaseManager from .task_manager import TaskManager from bolna.helpers.logger_config import configure_logger
7,770
logger = configure_logger(__name__) # Find your Account SID and Auth Token at twilio.com/console # and set the environment variables. See http://twil.io/secure account_sid = os.environ['TWILIO_ACCOUNT_SID'] auth_token = os.environ['TWILIO_AUTH_TOKEN'] client = Client(account_sid, auth_token) enc = tiktoken.get_encoding("cl100k_base")
logger = configure_logger(__name__) # Find your Account SID and Auth Token at twilio.com/console # and set the environment variables. See http://twil.io/secure account_sid = os.environ['TWILIO_ACCOUNT_SID'] auth_token = os.environ['TWILIO_AUTH_TOKEN'] client = Client(account_sid, auth_token) enc = tiktoken.get_encoding("cl100k_base")
class AssistantManager(BaseManager):
0
2023-12-13 09:07:35+00:00
12k
qitan/devops-backend-lite
apps/ucenter/views.py
[ { "identifier": "FEISHU_SYNC_USER_JOB_CACHE_KEY", "path": "common/variables.py", "snippet": "FEISHU_SYNC_USER_JOB_CACHE_KEY = 'celery_job:feishu_user_sync'" }, { "identifier": "Menu", "path": "dbapp/models.py", "snippet": "" }, { "identifier": "CustomModelViewSet", "path": "common/extends/viewsets.py", "snippet": "class CustomModelViewSet(viewsets.ModelViewSet):\n \"\"\"\n A viewset that provides default `create()`, `retrieve()`, `update()`,\n `partial_update()`, `destroy()` and `list()` actions.\n \"\"\"\n\n def get_permission_from_role(self, request):\n try:\n perms = request.user.roles.values(\n 'permissions__method',\n ).distinct()\n return [p['permissions__method'] for p in perms]\n except AttributeError:\n return []\n\n def extend_filter(self, queryset):\n return queryset\n\n def get_queryset(self):\n \"\"\"\n Get the list of items for this view.\n This must be an iterable, and may be a queryset.\n Defaults to using `self.queryset`.\n\n This method should always be used rather than accessing `self.queryset`\n directly, as `self.queryset` gets evaluated only once, and those results\n are cached for all subsequent requests.\n\n You may want to override this if you need to provide different\n querysets depending on the incoming request.\n\n (Eg. return a list of items that is specific to the user)\n \"\"\"\n assert self.queryset is not None, (\n \"'%s' should either include a `queryset` attribute, \"\n \"or override the `get_queryset()` method.\"\n % self.__class__.__name__\n )\n queryset = self.extend_filter(self.queryset)\n if isinstance(queryset, QuerySet):\n # Ensure queryset is re-evaluated on each request.\n queryset = queryset.all()\n return queryset.distinct()\n\n @action(methods=['GET'], url_path='count', detail=False)\n def count(self, request, *args, **kwargs):\n queryset = self.get_queryset()\n return Response({'code': 20000, 'data': queryset.count()})\n\n def create(self, request, *args, **kwargs):\n try:\n request.data['name'] = request.data['name'].strip(\n ' ').replace(' ', '-')\n except BaseException as e:\n print('exception ', str(e))\n serializer = self.get_serializer(data=request.data)\n if not serializer.is_valid():\n return Response({'code': 40000, 'status': 'failed', 'message': serializer.errors})\n try:\n self.perform_create(serializer)\n except BaseException as e:\n return Response({'code': 50000, 'status': 'failed', 'message': str(e)})\n log_audit(request, action_type=self.serializer_class.Meta.model.__name__, action='创建', content='',\n data=serializer.data)\n\n data = {'data': serializer.data, 'status': 'success', 'code': 20000}\n return Response(data)\n\n def list(self, request, pk=None, *args, **kwargs):\n queryset = self.filter_queryset(self.get_queryset())\n page_size = request.query_params.get('page_size')\n pagination.PageNumberPagination.page_size = page_size\n page = self.paginate_queryset(queryset)\n if page is not None:\n serializer = self.get_serializer(page, many=True)\n return self.get_paginated_response(serializer.data)\n serializer = self.get_serializer(queryset, many=True)\n data = {'data': {'total': queryset.count(), 'items': serializer.data},\n 'code': 20000, 'status': 'success'}\n return Response(data)\n\n def update(self, request, *args, **kwargs):\n instance = self.get_object()\n partial = kwargs.pop('partial', False)\n try:\n request.data['name'] = request.data['name'].strip(\n ' ').replace(' ', '-')\n except BaseException as e:\n logger.warning(f'不包含name字段: {str(e)}')\n serializer = self.get_serializer(\n instance, data=request.data, partial=partial)\n if not serializer.is_valid():\n return Response({'code': 40000, 'status': 'failed', 'message': str(serializer.errors)})\n try:\n self.perform_update(serializer)\n except BaseException as e:\n logger.exception(f'更新失败,原因:{e}')\n return Response({'code': 50000, 'status': 'failed', 'message': str(e)})\n\n if getattr(instance, '_prefetched_objects_cache', None):\n # If 'prefetch_related' has been applied to a queryset, we need to\n # forcibly invalidate the prefetch cache on the instance.\n instance._prefetched_objects_cache = {}\n\n log_audit(request, self.serializer_class.Meta.model.__name__, '更新', content=f\"更新对象:{instance}\",\n data=serializer.data, old_data=self.serializer_class(instance).data)\n\n data = {'data': serializer.data, 'status': 'success', 'code': 20000}\n return Response(data)\n\n def retrieve(self, request, *args, **kwargs):\n instance = self.get_object()\n serializer = self.get_serializer(instance)\n data = {'data': serializer.data, 'code': 20000, 'status': 'success'}\n return Response(data)\n\n def destroy(self, request, *args, **kwargs):\n \"\"\"\n TODO: 删除操作物理删除 or 逻辑删除(增加删除标记字段)\n \"\"\"\n instance = self.get_object()\n try:\n self.perform_destroy(instance)\n except ProtectedError:\n # 存在关联数据,不可删除\n return Response({'code': 50000, 'status': 'failed', 'message': '存在关联数据,禁止删除!'})\n except BaseException as e:\n logger.exception(f'删除数据发生错误 {e}, {e.__class__}')\n return Response({'code': 50000, 'status': 'failed', 'message': f'删除异常: {str(e)}'})\n log_audit(request, self.serializer_class.Meta.model.__name__,\n '删除', content=f\"删除对象:{instance}\")\n\n return Response({'code': 20000, 'status': 'success', 'msg': ''})" }, { "identifier": "CustomModelParentViewSet", "path": "common/extends/viewsets.py", "snippet": "class CustomModelParentViewSet(CustomModelViewSet):\n\n def get_queryset(self):\n assert self.queryset is not None, (\n \"'%s' should either include a `queryset` attribute, \"\n \"or override the `get_queryset()` method.\"\n % self.__class__.__name__\n )\n queryset = self.extend_filter(self.queryset)\n if self.action == 'list':\n if not self.request.query_params.get('search'):\n queryset = queryset.filter(parent__isnull=True)\n if isinstance(queryset, QuerySet):\n queryset = queryset.all()\n return queryset.distinct()" }, { "identifier": "RbacPermission", "path": "common/extends/permissions.py", "snippet": "class RbacPermission(BasePermission):\n \"\"\"\n 自定义权限\n \"\"\"\n\n @classmethod\n def check_is_admin(cls, request):\n return request.user.is_authenticated and request.user.roles.filter(name='管理员').count() > 0\n\n @classmethod\n def get_permission_from_role(cls, request):\n try:\n perms = request.user.roles.values(\n 'permissions__method',\n ).distinct()\n return [p['permissions__method'] for p in perms]\n except AttributeError:\n return []\n\n def _has_permission(self, request, view):\n \"\"\"\n :return:\n \"\"\"\n _method = request._request.method.lower()\n platform = get_redis_data('platform')\n url_whitelist = platform['whitelist'] if platform else []\n url_whitelist.extend(\n [{'url': '/api/login/feishu/'}, {'url': '/api/login/gitlab/'}])\n path_info = request.path_info\n for item in url_whitelist:\n url = item['url']\n if url in path_info:\n logger.debug(f'请求地址 {path_info} 命中白名单 {url}, 放行')\n return True\n\n from_workflow = 'from_workflow' in request.GET\n if _method == 'get' and from_workflow:\n return True\n\n is_superuser = request.user.is_superuser\n if is_superuser:\n return True\n\n is_admin = RbacPermission.check_is_admin(request)\n perms = self.get_permission_from_role(request)\n if not is_admin and not perms:\n logger.debug(f'用户 {request.user} 不是管理员 且 权限列表为空, 直接拒绝')\n return False\n\n perms_map = view.perms_map\n\n action = view.action\n _custom_method = f'{_method}_{action}'\n for i in perms_map:\n for method, alias in i.items():\n if is_admin and (method == '*' and alias[0] == 'admin'):\n return True\n if method == '*' and alias[0] in perms:\n return True\n if _custom_method and alias[0] in perms and (_custom_method == method or method == f'*_{action}'):\n return True\n if _method == method and alias[0] in perms:\n return True\n return False\n\n def has_permission(self, request, view):\n res = self._has_permission(request, view)\n # 记录权限异常的操作\n if not res:\n AuditLog.objects.create(\n user=request.user, type='', action='拒绝操作',\n action_ip=user_ip(request),\n content=f\"请求方法:{request.method},请求路径:{request.path},UserAgent:{request.META['HTTP_USER_AGENT']}\",\n data='',\n old_data=''\n )\n return res" }, { "identifier": "CustomInvalidToken", "path": "common/extends/JwtAuth.py", "snippet": "class CustomInvalidToken(InvalidToken):\n status_code = status.HTTP_401_UNAUTHORIZED\n default_detail = 'Token不合法或者已经过期.'\n default_code = 40100" }, { "identifier": "TokenObtainPairSerializer", "path": "common/extends/JwtAuth.py", "snippet": "class TokenObtainPairSerializer(BaseTokenObtainPairSerializer):\n\n default_error_messages = {\n \"no_active_account\": \"用户名或者密码错误!\"\n }\n\n @classmethod\n def get_token(cls, user):\n token = RefreshToken.for_user(user)\n return token" }, { "identifier": "TokenRefreshSerializer", "path": "common/extends/JwtAuth.py", "snippet": "class TokenRefreshSerializer(BaseTokenRefreshSerializer):\n\n def validate(self, attrs):\n refresh = RefreshToken(attrs['refresh'])\n data = {'access': str(refresh.access_token)}\n\n if api_settings.ROTATE_REFRESH_TOKENS:\n if api_settings.BLACKLIST_AFTER_ROTATION:\n try:\n # Attempt to blacklist the given refresh token\n refresh.blacklist()\n except AttributeError:\n # If blacklist app not installed, `blacklist` method will\n # not be present\n pass\n\n refresh.set_jti()\n refresh.set_exp()\n\n data['refresh'] = str(refresh)\n\n return data" }, { "identifier": "log_audit", "path": "common/extends/handler.py", "snippet": "def log_audit(request, action_type, action, content=None, data=None, old_data=None, user=None):\n if user is None:\n user = request.user.first_name or request.user.username\n\n AuditLog.objects.create(user=user, type=action_type, action=action,\n action_ip=user_ip(request),\n content=f\"{mask_sensitive_data(content)}\\n请求方法:{request.method},请求路径:{request.path},UserAgent:{request.META['HTTP_USER_AGENT']}\",\n data=mask_sensitive_data(data),\n old_data=mask_sensitive_data(old_data))" }, { "identifier": "AuditLogFilter", "path": "common/extends/filters.py", "snippet": "class AuditLogFilter(FilterSet):\n exclude = ExcludeFilter(field_name='type', lookup_expr='in', exclude=True)\n type = CharFilter(field_name='type')\n\n class Meta:\n models = AuditLog\n fields = ['type', 'exclude']" }, { "identifier": "CustomSearchFilter", "path": "common/extends/filters.py", "snippet": "class CustomSearchFilter(SearchFilter):\n\n def get_search_fields(self, view, request):\n \"\"\"\n Search fields are obtained from the view, but the request is always\n passed to this method. Sub-classes can override this method to\n dynamically change the search fields based on request content.\n \"\"\"\n if hasattr(view, 'get_search_fields'):\n return view.get_search_fields()\n return getattr(view, 'search_fields', None)\n\n def get_search_terms(self, request):\n \"\"\"\n Search terms are set by a ?search=... query parameter,\n and may be comma and/or whitespace delimited.\n \"\"\"\n params = request.query_params.get(self.search_param, '')\n params = params.replace('\\x00', '') # strip null characters\n values = params.strip('+').split('+')\n if len(values) > 1:\n return values, 1\n params = params.replace(',', ' ')\n params = params.replace('|', ' ')\n return params.split(), 0\n\n def filter_queryset(self, request, queryset, view):\n search_fields = self.get_search_fields(view, request)\n search_param = self.get_search_terms(request)\n search_terms = search_param[0]\n search_condition = search_param[1]\n if not search_fields or not search_terms:\n return queryset\n\n orm_lookups = [\n self.construct_search(str(search_field))\n for search_field in search_fields\n ]\n\n base = queryset\n conditions = []\n for search_term in search_terms:\n queries = [\n models.Q(**{orm_lookup: search_term.strip()})\n for orm_lookup in orm_lookups\n ]\n conditions.append(reduce(operator.or_, queries))\n if search_condition == 1:\n queryset = queryset.filter(reduce(operator.and_, conditions))\n else:\n queryset = queryset.filter(reduce(operator.or_, conditions))\n\n if self.must_call_distinct(queryset, search_fields):\n # Filtering against a many-to-many field requires us to\n # call queryset.distinct() in order to avoid duplicate items\n # in the resulting queryset.\n # We try to avoid this if possible, for performance reasons.\n queryset = distinct(queryset, base)\n return queryset" }, { "identifier": "GlueJenkins", "path": "common/utils/JenkinsAPI.py", "snippet": "class GlueJenkins(Jenkins):\n\n def __init__(self, url=None, username=None, password=None):\n self.__url = url\n self.__username = username\n self.__password = password\n super(GlueJenkins, self).__init__(\n self.__url, self.__username, self.__password)\n\n def _get_encoded_params(self, params):\n for k, v in params.items():\n if k in [\"name\", \"msg\", \"short_name\", \"from_short_name\",\n \"to_short_name\", \"folder_url\", \"from_folder_url\", \"to_folder_url\"]:\n params[k] = quote(v.encode('utf8'))\n return params\n\n def _build_url(self, format_spec, variables=None):\n\n if variables:\n url_path = format_spec % self._get_encoded_params(variables)\n else:\n url_path = format_spec\n return str(urljoin(self.server, url_path))\n\n def assert_credential_exists(self, name, folder_name=None, domain_name='_',\n exception_message='credential[%s] does not exist.'):\n '''Raise an exception if credential does not exist in domain of folder\n\n :param name: Name of credential, ``str``\n :param folder_name: Folder name, ``str``\n :param domain_name: Domain name, default is '_', ``str``\n :param exception_message: Message to use for the exception.\n Formatted with ``name``, ``domain_name``,\n and ``folder_name``\n :throws: :class:`JenkinsException` whenever the credentail\n does not exist in domain of folder\n '''\n if not self.credential_exists(name, folder_name, domain_name):\n raise JenkinsException(exception_message\n % name)\n\n def get_credential_global_config(self, name, domain_name='_'):\n '''Get configuration of credential in domain of folder.\n :param name: Name of credentail, ``str``\n :param domain_name: Domain name, default is '_', ``str``\n :returns: Credential configuration (XML format)\n '''\n return self.jenkins_open(requests.Request(\n 'GET', self._build_url(CONFIG_CREDENTIAL_GLOBAL, locals())\n ))\n\n def get_credential_info(self, name, folder_name=None, domain_name='_'):\n '''Get credential information dictionary in domain of folder\n\n :param name: Name of credentail, ``str``\n :param folder_name: folder_name, ``str``\n :param domain_name: Domain name, default is '_', ``str``\n :returns: Dictionary of credential info, ``dict``\n '''\n try:\n response = self.jenkins_open(requests.Request(\n 'GET', self._build_url(CREDENTIAL_INFO_GLOBAL, locals())\n ))\n if response:\n return json.loads(response)\n else:\n raise JenkinsException('credential[%s] does not exist.' % name)\n except (req_exc.HTTPError, NotFoundException):\n raise JenkinsException('credential[%s] does not exist.' % name)\n except ValueError:\n raise JenkinsException(\n 'Could not parse JSON info for credential[%s].' % name\n )\n\n def credential_exists(self, name, folder_name=None, domain_name='_'):\n '''Check whether a credentail exists in domain of folder\n\n :param name: Name of credentail, ``str``\n :param folder_name: Folder name, ``str``\n :param domain_name: Domain name, default is '_', ``str``\n :returns: ``True`` if credentail exists, ``False`` otherwise\n '''\n try:\n return self.get_credential_info(name)['id'] == name\n except JenkinsException:\n return False\n\n def create_credential_global(self, name=None, user=None, password=None, secret=None, comment=None, domain_name='_'):\n '''Create credentail in domain of folder\n\n :param name: username\n :param password: password\n :param comment: comment, ``str``\n :param config_xml: New XML configuration, ``str``\n :param domain_name: Domain name, default is '_', ``str``\n '''\n st = shortuuid.ShortUUID()\n st.set_alphabet(\n f\"0123456789{''.join([chr(i) for i in range(ord('a'), ord('z') + 1)])}\")\n if name is None:\n name = '-'.join(['api', st.random(length=8),\n st.random(length=4), st.random(length=12)])\n config_xml = '''<com.cloudbees.plugins.credentials.impl.UsernamePasswordCredentialsImpl>\n <scope>GLOBAL</scope>\n <id>%s</id>\n <description>[%s] Created by DevOps Platform</description>\n <username>%s</username>\n <password>%s</password>\n</com.cloudbees.plugins.credentials.impl.UsernamePasswordCredentialsImpl>''' % (name, comment, user, password)\n if user is None:\n config_xml = '''<org.jenkinsci.plugins.plaincredentials.impl.StringCredentialsImpl>\n <scope>GLOBAL</scope>\n <id>%s</id>\n <description>[%s] Created by DevOps Platform</description>\n <secret>%s</secret>\n</org.jenkinsci.plugins.plaincredentials.impl.StringCredentialsImpl>''' % (name, comment, secret)\n if self.credential_exists(name):\n raise JenkinsException('credential[%s] already exists.' % name)\n\n self.jenkins_open(requests.Request(\n 'POST', self._build_url(CREATE_CREDENTIAL_GLOBAL, locals()),\n data=config_xml.encode('utf-8'),\n headers=DEFAULT_HEADERS\n ))\n self.assert_credential_exists(\n name, exception_message='create credential[%s] failed.')\n return {'status': 0, 'data': name}\n\n def reconfig_credential_global(self, name, user=None, password=None, secret=None, comment=None, domain_name='_'):\n \"\"\"\n Reconfig credential with new config in domain of folder\n :param name: name, ``str``\n :param user:\n :param password:\n :param secret:\n :param comment:\n :param domain_name: Domain name, default is '_', ``str``\n :return:\n \"\"\"\n reconfig_url = self._build_url(CONFIG_CREDENTIAL_GLOBAL, locals())\n config_xml = self.get_credential_global_config(name)\n xml_dict = xmltodict.parse(config_xml)\n if user is None:\n xml_dict['org.jenkinsci.plugins.plaincredentials.impl.StringCredentialsImpl']['secret'] = secret\n if comment:\n xml_dict['org.jenkinsci.plugins.plaincredentials.impl.StringCredentialsImpl']['description'] = comment\n else:\n xml_dict['com.cloudbees.plugins.credentials.impl.UsernamePasswordCredentialsImpl']['username'] = user\n xml_dict['com.cloudbees.plugins.credentials.impl.UsernamePasswordCredentialsImpl']['password'] = password\n if comment:\n xml_dict['com.cloudbees.plugins.credentials.impl.UsernamePasswordCredentialsImpl'][\n 'description'] = comment\n config_xml = xmltodict.unparse(xml_dict, pretty=True)\n self.jenkins_open(requests.Request(\n 'POST', reconfig_url,\n data=config_xml.encode('utf-8'),\n headers=DEFAULT_HEADERS\n ))\n\n def create_job(self, name, config_xml):\n '''Create a new Jenkins job\n\n :param name: Name of Jenkins job, ``str``\n :param config_xml: config file text, ``str``\n '''\n folder_url, short_name = self._get_job_folder(name)\n if self.job_exists(name):\n raise JenkinsException('job[%s] already exists' % (name))\n\n try:\n self.jenkins_open(requests.Request(\n 'POST', self._build_url(CREATE_JOB, locals()),\n data=config_xml.encode('utf-8'),\n headers=DEFAULT_HEADERS\n ))\n except NotFoundException:\n raise JenkinsException('Cannot create job[%s] because folder '\n 'for the job does not exist' % (name))\n self.assert_job_exists(name, 'create[%s] failed')\n\n def reconfig_job(self, name, config_xml):\n '''Change configuration of existing Jenkins job.\n\n To create a new job, see :meth:`Jenkins.create_job`.\n\n :param name: Name of Jenkins job, ``str``\n :param config_xml: New XML configuration, ``str``\n '''\n folder_url, short_name = self._get_job_folder(name)\n reconfig_url = self._build_url(CONFIG_JOB, locals())\n self.jenkins_open(requests.Request(\n 'POST', reconfig_url,\n data=config_xml.encode('utf-8'),\n headers=DEFAULT_HEADERS\n ))\n\n def get_stage_describe(self, name, number, node_number):\n \"\"\" 获取 单个stage 详情 \"\"\"\n folder_url, short_name = self._get_job_folder(name)\n try:\n response = self.jenkins_open(requests.Request(\n 'GET', self._build_url(STAGE_DES, locals())\n ))\n\n if response:\n return json.loads(response)\n else:\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (name, number))\n except (req_exc.HTTPError, NotFoundException):\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (name, number))\n except ValueError:\n raise JenkinsException(\n 'Could not parse JSON info for job[%s] number[%d]'\n % (name, number)\n )\n\n def get_stage_logs(self, name, number, node_number):\n \"\"\" 获取 stage 执行日志\"\"\"\n folder_url, short_name = self._get_job_folder(name)\n try:\n response = self.jenkins_open(requests.Request(\n 'GET', self._build_url(STAGE_LOG, locals())\n ))\n if response:\n return json.loads(response)\n else:\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (name, number))\n except (req_exc.HTTPError, NotFoundException):\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (name, number))\n except ValueError:\n raise JenkinsException(\n 'Could not parse JSON info for job[%s] number[%d]'\n % (name, number)\n )\n\n def get_stage_info(self, name, number, depth=0):\n\n folder_url, short_name = self._get_job_folder(name)\n try:\n response = self.jenkins_open(requests.Request(\n 'GET', self._build_url(STAGE_INFO, locals())\n ))\n if response:\n return json.loads(response)\n else:\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (name, number))\n except (req_exc.HTTPError, NotFoundException):\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (name, number))\n except ValueError:\n raise JenkinsException(\n 'Could not parse JSON info for job[%s] number[%d]'\n % (name, number)\n )\n\n def get_flow_detail(self, job_name, build_number):\n stage_data = self.get_stage_info(name=job_name, number=build_number)\n stages = stage_data.get('stages')\n for i in stages:\n logs = ''\n try:\n # 获取stage返回信息\n response = self.jenkins_open(requests.Request(\n 'GET', self._build_url(\n unquote(i['_links']['self']['href']), locals())\n ))\n if response:\n res = json.loads(response)\n for j in res['stageFlowNodes']:\n response = self.jenkins_open(requests.Request(\n 'GET', self._build_url(\n unquote(j['_links']['log']['href']), locals())\n ))\n res = json.loads(response)\n try:\n # 移除href html信息,保留链接文字\n import re\n pat = re.compile('<a href[^>]*>')\n logs = logs + '\\n' + \\\n pat.sub('', res['text'].replace('</a>', ''))\n except:\n pass\n else:\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (job_name, build_number))\n except (req_exc.HTTPError, NotFoundException):\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (job_name, build_number))\n except ValueError:\n raise JenkinsException(\n 'Could not parse JSON info for job[%s] number[%d]'\n % (job_name, build_number)\n )\n\n stage_data[\"stages\"][stages.index(i)]['logs'] = logs\n return stage_data\n\n def get_queue_item(self, number, depth=0):\n '''Get information about a queued item (to-be-created job).\n\n The returned dict will have a \"why\" key if the queued item is still\n waiting for an executor.\n\n The returned dict will have an \"executable\" key if the queued item is\n running on an executor, or has completed running. Use this to\n determine the job number / URL.\n\n :param name: queue number, ``int``\n :returns: dictionary of queued information, ``dict``\n '''\n url = self._build_url(Q_ITEM, locals())\n try:\n response = self.jenkins_open(requests.Request('GET', url))\n if response:\n return json.loads(response)\n else:\n raise JenkinsException('queue number[%d] does not exist'\n % number)\n except (req_exc.HTTPError, NotFoundException):\n raise JenkinsException('queue number[%d] does not exist' % number)\n except ValueError:\n raise JenkinsException(\n 'Could not parse JSON info for queue number[%d]' % number\n )\n\n def build_job(self, name, parameters=None, token=None):\n '''Trigger build job.\n\n This method returns a queue item number that you can pass to\n :meth:`Jenkins.get_queue_item`. Note that this queue number is only\n valid for about five minutes after the job completes, so you should\n get/poll the queue information as soon as possible to determine the\n job's URL.\n\n :param name: name of job\n :param parameters: parameters for job, or ``None``, ``dict``\n :param token: Jenkins API token\n :returns: ``int`` queue item\n '''\n response = self.jenkins_request(requests.Request(\n 'POST', self.build_job_url(name, parameters, token)))\n\n if 'Location' not in response.headers:\n raise EmptyResponseException(\n \"Header 'Location' not found in \"\n \"response from server[%s]\" % self.server)\n\n location = response.headers['Location']\n if location.endswith('/'):\n location = location[:-1]\n parts = location.split('/')\n number = int(parts[-1])\n return number\n\n def get_job_config(self, name):\n '''Get configuration of existing Jenkins job.\n\n :param name: Name of Jenkins job, ``str``\n :returns: job configuration (XML format)\n '''\n folder_url, short_name = self._get_job_folder(name)\n request = requests.Request(\n 'GET', self._build_url(CONFIG_JOB, locals()))\n return self.jenkins_open(request)\n\n def get_job_info(self, name, depth=0, fetch_all_builds=False):\n '''Get job information dictionary.\n\n :param name: Job name, ``str``\n :param depth: JSON depth, ``int``\n :param fetch_all_builds: If true, all builds will be retrieved\n from Jenkins. Otherwise, Jenkins will\n only return the most recent 100\n builds. This comes at the expense of\n an additional API call which may\n return significant amounts of\n data. ``bool``\n :returns: dictionary of job information\n '''\n folder_url, short_name = self._get_job_folder(name)\n try:\n response = self.jenkins_open(requests.Request(\n 'GET', self._build_url(JOB_INFO, locals())\n ))\n if response:\n if fetch_all_builds:\n return self._add_missing_builds(json.loads(response))\n else:\n return json.loads(response)\n else:\n raise JenkinsException('job[%s] does not exist' % name)\n except (req_exc.HTTPError, NotFoundException):\n raise JenkinsException('job[%s] does not exist' % name)\n except ValueError:\n raise JenkinsException(\n \"Could not parse JSON info for job[%s]\" % name)" }, { "identifier": "user_ip", "path": "common/get_ip.py", "snippet": "def user_ip(request):\n \"\"\"\n 获取用户真实IP\n :param request:\n :return:\n \"\"\"\n if 'X-Real-IP' in request.META:\n return request.META['X-Real-IP']\n if 'HTTP_X_FORWARDED_FOR' in request.META:\n return request.META['HTTP_X_FORWARDED_FOR'].split(',')[0]\n if 'REMOTE_ADDR' in request.META:\n return request.META['REMOTE_ADDR'].split(',')[0]" }, { "identifier": "ThirdPartyUser", "path": "common/ext_fun.py", "snippet": "class ThirdPartyUser(object):\n\n def get_user(self):\n user = UserProfile.objects.get_or_create(username='thirdparty')[0]\n self.set_permission(user, self.get_role())\n return user\n\n def get_role(self):\n return Role.objects.get_or_create(name='thirdparty')[0]\n\n def get_perm(self):\n return Permission.objects.get_or_create(name='Jenkins回调', method='jenkins_callback')[0]\n\n def set_permission(self, user, role):\n role.permissions.set([self.get_perm().id])\n user.roles.set([role.id])" }, { "identifier": "set_redis_data", "path": "common/ext_fun.py", "snippet": "def set_redis_data(name, config):\n cache.set(f\"system:{name}\", config, None)" }, { "identifier": "get_redis_data", "path": "common/ext_fun.py", "snippet": "def get_redis_data(name):\n ret = cache.get(f\"system:{name}\")\n if not ret:\n try:\n if name == 'cicd-harbor':\n qs = SystemConfig.objects.filter(type=name)[0]\n else:\n qs = SystemConfig.objects.get(name=name)\n except BaseException as e:\n return None\n ret = json.loads(qs.config)\n set_redis_data(name, ret)\n\n return ret" }, { "identifier": "timeline_generate", "path": "common/ext_fun.py", "snippet": "def timeline_generate(time_range, format_type='dashboard'):\n \"\"\"\n 根据起始时间生成时间线\n\n : params format_type: 默认为dashboard, 用于概览报表粗略显示, 其它用于监控类的展示则使用更细粒度的格式\n \"\"\"\n TIME_FREQNAMES = DASHBOARD_TIME_FREQNAMES\n TIME_FORMAT = DASHBOARD_TIME_FORMAT\n if format_type == 'cmdb':\n TIME_FREQNAMES = DASHBOARD_TIME_FREQNAMES_T\n TIME_FORMAT = DASHBOARD_TIME_FORMAT_T\n start_time = time_range['start_time']\n end_time = time_range['end_time']\n time_line = rrule(\n freq=TIME_FREQNAMES[time_range['name']], dtstart=start_time, until=end_time)\n return [i.strftime(TIME_FORMAT[time_range['name']]) for i in time_line]" }, { "identifier": "time_period", "path": "common/ext_fun.py", "snippet": "def time_period(time_range='6-months', type_range='static', time_zone='Asia/Shanghai', name=None):\n \"\"\"\n 根据时间范围生成起止时间\n \"\"\"\n start_time = None\n end_time = timezone.now().astimezone(pytz.timezone(time_zone))\n if type_range == 'dynamic' and name is None:\n start_time = datetime.strptime(time_range[0], '%Y-%m-%d %H:%M:%S')\n end_time = datetime.strptime(time_range[1], '%Y-%m-%d %H:%M:%S')\n if start_time > end_time:\n start_time, end_time = end_time, start_time\n if (end_time - start_time).days >= 60:\n name = 'months'\n elif (end_time - start_time).days >= 2:\n name = 'days'\n elif (end_time - start_time).days >= 1 or (end_time - start_time).seconds > 60 * 60:\n name = 'hours'\n else:\n name = 'minutes'\n return {'name': name, 'start_time': start_time, 'end_time': end_time}\n\n if type_range == 'static':\n _time = time_range.split('-')\n if _time[-1] == 'week':\n start_time = end_time - relativedelta(days=end_time.weekday(), hours=end_time.hour, minutes=end_time.minute,\n seconds=end_time.second,\n microseconds=end_time.microsecond)\n return {'name': 'days', 'start_time': start_time, 'end_time': end_time}\n if _time[-1] == 'lastweek':\n start_time = end_time - relativedelta(days=end_time.weekday() + 7, hours=end_time.hour,\n minutes=end_time.minute, seconds=end_time.second,\n microseconds=end_time.microsecond)\n end_time = end_time - relativedelta(days=end_time.weekday(), hours=end_time.hour, minutes=end_time.minute,\n seconds=end_time.second, microseconds=end_time.microsecond)\n return {'name': 'days', 'start_time': start_time, 'end_time': end_time}\n if _time[-1] in ['today', 'yesterday']:\n start_time = end_time - relativedelta(hours=end_time.hour, minutes=end_time.minute, seconds=end_time.second,\n microseconds=end_time.microsecond)\n if _time[-1] == 'yesterday':\n end_time = start_time\n start_time = end_time - relativedelta(days=1)\n return {'name': 'hours', 'start_time': start_time, 'end_time': end_time}\n name = _time[1]\n if name is None:\n if _time[1] in ['years', 'months']:\n name = 'months'\n if _time[1] == 'months' and int(_time[0]) < 2:\n name = 'days'\n if _time[1] == 'days' and int(_time[0]) < 2:\n name = 'hours'\n start_time = end_time + relativedelta(**{_time[1]: -int(_time[0])})\n return {'name': name, 'start_time': start_time, 'end_time': end_time}" }, { "identifier": "node_filter", "path": "common/ext_fun.py", "snippet": "def node_filter(node_id, data):\n \"\"\"\n 查找节点\n\n :params: node_id int 节点ID\n :params: data list 节点数组\n \"\"\"\n for i in data:\n if i['id'] == node_id:\n print('get node', i)\n return i\n else:\n if i.get('children', None):\n node = node_filter(node_id, i['children'])\n if isinstance(node, (dict,)):\n return node" }, { "identifier": "test_notify", "path": "qtasks/tasks.py", "snippet": "def test_notify(receiver, notify_type='mail', robot_name=None, robot_webhook=None, robot_key=None,\n robot_type='dingtalk'):\n ret = None\n if notify_type == 'mail':\n mail_send = OmsMail()\n ret = mail_send.test_notify(receiver)\n if notify_type == 'robot':\n robot_notify = ROBOT_CATEGORIES[robot_type](robot_webhook, robot_key)\n ret = robot_notify.test_notify(receiver, robot_name)\n\n return ret" } ]
import hashlib import django_filters import datetime import time import shortuuid import json import logging from django.core.cache import cache from rest_framework import viewsets, status from rest_framework.views import APIView from rest_framework.response import Response from rest_framework.decorators import action from rest_framework import pagination from rest_framework_simplejwt.views import TokenObtainPairView, TokenRefreshView from rest_framework_simplejwt.exceptions import TokenError, InvalidToken from rest_framework_simplejwt.authentication import JWTAuthentication from rest_framework_simplejwt.tokens import RefreshToken, Token, OutstandingToken from rest_framework.filters import SearchFilter, OrderingFilter from django_q.tasks import async_task, result from django.contrib.auth.models import update_last_login from django.db.models import Q from django.contrib.auth import logout from common.variables import FEISHU_SYNC_USER_JOB_CACHE_KEY from dbapp.models import Menu, Permission, Role, Organization, UserProfile, AuditLog, SystemConfig, DataDict from ucenter.serializers import MenuSerializers, MenuListSerializers, PermissionListSerializers, PermissionSerializers, \ RoleListSerializers, \ RoleSerializers, OrganizationSerializers, \ UserProfileListSerializers, UserProfileSerializers, UserProfileDetailSerializers, AuditLogSerializers, \ AuditLogActivitySerializers, SystemConfigSerializers, \ SystemConfigListSerializers, DataDictSerializers from common.extends.viewsets import CustomModelViewSet, CustomModelParentViewSet from common.extends.permissions import RbacPermission from common.extends.JwtAuth import CustomInvalidToken, TokenObtainPairSerializer, TokenRefreshSerializer from common.extends.handler import log_audit from common.extends.filters import AuditLogFilter, CustomSearchFilter from common.utils.JenkinsAPI import GlueJenkins from common.get_ip import user_ip from common.ext_fun import ThirdPartyUser, set_redis_data, get_redis_data, timeline_generate, time_period, \ node_filter from qtasks.tasks import test_notify from django.conf import settings from django.contrib.auth import login, REDIRECT_FIELD_NAME from django.views.decorators.csrf import csrf_exempt, csrf_protect from django.views.decorators.cache import never_cache
9,520
#!/usr/bin/env python # -*- coding: utf-8 -*- """ @Author : Charles Lai @Contact : [email protected] @Time : 2020/9/15 下午4:08 @FileName: views.py @Blog :https://imaojia.com """ logger = logging.getLogger('drf') DEFAULT_SESSION_TIMEOUT = None
#!/usr/bin/env python # -*- coding: utf-8 -*- """ @Author : Charles Lai @Contact : [email protected] @Time : 2020/9/15 下午4:08 @FileName: views.py @Blog :https://imaojia.com """ logger = logging.getLogger('drf') DEFAULT_SESSION_TIMEOUT = None
class DataDictViewSet(CustomModelParentViewSet):
3
2023-12-13 03:09:32+00:00
12k
AdaCheng/EgoThink
models/instruct_blip/models/blip2_models/blip2_t5_instruct.py
[ { "identifier": "registry", "path": "models/instruct_blip/common/registry.py", "snippet": "class Registry:\n def register_model(cls, name):\n def wrap(model_cls):\n def register_processor(cls, name):\n def wrap(processor_cls):\n def register_lr_scheduler(cls, name):\n def wrap(lr_sched_cls):\n def register_runner(cls, name):\n def wrap(runner_cls):\n def register_path(cls, name, path):\n def register(cls, name, obj):\n def get_builder_class(cls, name):\n def get_model_class(cls, name):\n def get_task_class(cls, name):\n def get_processor_class(cls, name):\n def get_lr_scheduler_class(cls, name):\n def get_runner_class(cls, name):\n def list_runners(cls):\n def list_models(cls):\n def list_tasks(cls):\n def list_processors(cls):\n def list_lr_schedulers(cls):\n def list_datasets(cls):\n def get_path(cls, name):\n def get(cls, name, default=None, no_warning=False):\n def unregister(cls, name):" }, { "identifier": "Blip2Base", "path": "models/instruct_blip/models/blip2_models/blip2.py", "snippet": "class Blip2Base(BaseModel):\n @classmethod\n def init_tokenizer(cls, truncation_side=\"right\"):\n tokenizer = BertTokenizer.from_pretrained(\"bert-base-uncased\", truncation_side=truncation_side)\n tokenizer.add_special_tokens({\"bos_token\": \"[DEC]\"})\n return tokenizer\n\n def maybe_autocast(self, dtype=torch.float16):\n # if on cpu, don't use autocast\n # if on gpu, use autocast with dtype if provided, otherwise use torch.float16\n enable_autocast = self.device != torch.device(\"cpu\")\n\n if enable_autocast:\n return torch.cuda.amp.autocast(dtype=dtype)\n else:\n return contextlib.nullcontext()\n\n @classmethod\n def init_Qformer(cls, num_query_token, vision_width, cross_attention_freq=2):\n encoder_config = BertConfig.from_pretrained(\"bert-base-uncased\")\n encoder_config.encoder_width = vision_width\n # insert cross-attention layer every other block\n encoder_config.add_cross_attention = True\n encoder_config.cross_attention_freq = cross_attention_freq\n encoder_config.query_length = num_query_token\n Qformer = BertLMHeadModel.from_pretrained(\n \"bert-base-uncased\", config=encoder_config\n )\n query_tokens = nn.Parameter(\n torch.zeros(1, num_query_token, encoder_config.hidden_size)\n )\n query_tokens.data.normal_(mean=0.0, std=encoder_config.initializer_range)\n return Qformer, query_tokens\n\n def init_vision_encoder(\n self, model_name, img_size, drop_path_rate, use_grad_checkpoint, precision\n ):\n assert model_name in [\n \"eva_clip_g\",\n \"eva2_clip_L\",\n \"clip_L\",\n ], \"vit model must be eva_clip_g, eva2_clip_L or clip_L\"\n if model_name == \"eva_clip_g\":\n visual_encoder = create_eva_vit_g(\n img_size, drop_path_rate, use_grad_checkpoint, precision\n )\n# elif model_name == \"eva2_clip_L\":\n# visual_encoder = create_eva2_vit_L(\n# img_size, drop_path_rate, use_grad_checkpoint, precision\n# )\n elif model_name == \"clip_L\":\n visual_encoder = create_clip_vit_L(img_size, use_grad_checkpoint, precision)\n ln_vision = LayerNorm(visual_encoder.num_features)\n self.vit_name = model_name\n return visual_encoder, ln_vision\n\n def load_from_pretrained(self, url_or_filename):\n if is_url(url_or_filename):\n cached_file = download_cached_file(\n url_or_filename, check_hash=False, progress=True\n )\n checkpoint = torch.load(cached_file, map_location=\"cpu\")\n elif os.path.isfile(url_or_filename):\n checkpoint = torch.load(url_or_filename, map_location=\"cpu\")\n else:\n raise RuntimeError(\"checkpoint url or path is invalid\")\n\n state_dict = checkpoint[\"model\"]\n\n msg = self.load_state_dict(state_dict, strict=False)\n\n # logging.info(\"Missing keys {}\".format(msg.missing_keys))\n logging.info(\"load checkpoint from %s\" % url_or_filename)\n\n return msg\n\n def get_optimizer_params(self, weight_decay, lr_scale=1):\n if self.vit_name == \"eva_clip_g\":\n vit_num_layers = self.visual_encoder.get_num_layer()\n lr_scales = list(lr_scale ** (vit_num_layers + 1 - i) for i in range(vit_num_layers + 2))\n\n parameter_group_names = {}\n parameter_group_vars = {}\n\n for name, param in self.named_parameters():\n if not param.requires_grad:\n continue # frozen weights\n if len(param.shape) == 1 or name.endswith(\".bias\"):\n group_name = \"no_decay\"\n this_weight_decay = 0.\n else:\n group_name = \"decay\"\n this_weight_decay = weight_decay\n if 'visual_encoder' in name:\n layer_id = self.visual_encoder.get_num_layer(name.replace('visual_encoder.',''))\n group_name = \"vit_layer_%d_%s\" % (layer_id, group_name)\n else:\n layer_id = None\n\n if group_name not in parameter_group_names:\n if layer_id is not None:\n scale = lr_scales[layer_id]\n else:\n scale = 1\n parameter_group_names[group_name] = {\n \"weight_decay\": this_weight_decay,\n \"params\": [],\n \"lr_scale\": scale\n }\n parameter_group_vars[group_name] = {\n \"weight_decay\": this_weight_decay,\n \"params\": [],\n \"lr_scale\": scale\n }\n parameter_group_vars[group_name][\"params\"].append(param)\n parameter_group_names[group_name][\"params\"].append(name)\n # import json\n # print(\"Param groups = %s\" % json.dumps(parameter_group_names, indent=2))\n optim_params = list(parameter_group_vars.values())\n return optim_params\n else:\n return super().get_optimizer_params(weight_decay,lr_scale)\n\n def _lemmatize(self, answers):\n def apply(answer):\n doc = self.lemmatizer(answer)\n\n words = []\n for token in doc:\n if token.pos_ in [\"NOUN\", \"VERB\"]:\n words.append(token.lemma_)\n else:\n words.append(token.text)\n answer = \" \".join(words)\n\n return answer\n\n return [apply(answer) for answer in answers]\n\n @property\n def lemmatizer(self):\n if self._lemmatizer is None:\n try:\n import spacy\n\n self._lemmatizer = spacy.load(\"en_core_web_sm\")\n except ImportError:\n logging.error(\n \"\"\"\n Please install spacy and en_core_web_sm model to apply lemmatization.\n python -m spacy download en_core_web_sm\n OR\n import spacy.cli\n spacy.cli.download(\"en_core_web_sm\")\n \"\"\"\n )\n exit(1)\n\n return self._lemmatizer" }, { "identifier": "disabled_train", "path": "models/instruct_blip/models/blip2_models/blip2.py", "snippet": "def disabled_train(self, mode=True):\n \"\"\"Overwrite model.train with this function to make sure train/eval mode\n does not change anymore.\"\"\"\n return self" }, { "identifier": "T5Config", "path": "models/instruct_blip/models/blip2_models/modeling_t5.py", "snippet": "_CONFIG_FOR_DOC = \"T5Config\"\n_TOKENIZER_FOR_DOC = \"T5Tokenizer\"\n_CHECKPOINT_FOR_DOC = \"t5-small\"\nT5_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"t5-small\",\n \"t5-base\",\n \"t5-large\",\n \"t5-3b\",\n \"t5-11b\",\n # See all T5 models at https://huggingface.co/models?filter=t5\n]\nPARALLELIZE_DOCSTRING = r\"\"\"\n This is an experimental feature and is a subject to change at a moment's notice.\n\n Uses a device map to distribute attention modules of the model across several devices. If no device map is given,\n it will evenly distribute blocks across all devices.\n\n Args:\n device_map (`Dict[int, list]`, optional, defaults to None):\n A dictionary that maps attention modules to devices. Note that the embedding module and LMHead are always\n automatically mapped to the first device (for esoteric reasons). That means that the first device should\n have fewer attention modules mapped to it than other devices. For reference, the t5 models have the\n following number of attention modules:\n\n - t5-small: 6\n - t5-base: 12\n - t5-large: 24\n - t5-3b: 24\n - t5-11b: 24\n\n Example:\n\n ```python\n # Here is an example of a device map on a machine with 4 GPUs using t5-3b, which has a total of 24 attention modules:\n model = T5ForConditionalGeneration.from_pretrained(\"t5-3b\")\n device_map = {\n 0: [0, 1, 2],\n 1: [3, 4, 5, 6, 7, 8, 9],\n 2: [10, 11, 12, 13, 14, 15, 16],\n 3: [17, 18, 19, 20, 21, 22, 23],\n }\n model.parallelize(device_map)\n ```\n\"\"\"\nDEPARALLELIZE_DOCSTRING = r\"\"\"\n Moves the model to cpu from a model parallel state.\n\n Example:\n\n ```python\n # On a 4 GPU machine with t5-3b:\n model = T5ForConditionalGeneration.from_pretrained(\"t5-3b\")\n device_map = {\n 0: [0, 1, 2],\n 1: [3, 4, 5, 6, 7, 8, 9],\n 2: [10, 11, 12, 13, 14, 15, 16],\n 3: [17, 18, 19, 20, 21, 22, 23],\n }\n model.parallelize(device_map) # Splits the model across several devices\n model.deparallelize() # Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache()\n ```\n\"\"\"\nT5_START_DOCSTRING = r\"\"\"\n\n The T5 model was proposed in [Exploring the Limits of Transfer Learning with a Unified Text-to-Text\n Transformer](https://arxiv.org/abs/1910.10683) by Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan\n Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu. It's an encoder decoder transformer pre-trained in a\n text-to-text denoising generative setting.\n\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`T5Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\"\"\"\nT5_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you\n should be able to pad the inputs on both the right and the left.\n\n Indices can be obtained using [`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for detail.\n\n [What are input IDs?](../glossary#input-ids)\n\n To know more on how to prepare `input_ids` for pretraining take a look a [T5 Training](./t5#training).\n attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):\n Indices of decoder input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are decoder input IDs?](../glossary#decoder-input-ids)\n\n T5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values`\n is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`).\n\n To know more on how to prepare `decoder_input_ids` for pretraining take a look at [T5\n Training](./t5#training).\n decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):\n Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also\n be used by default.\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules in the encoder. Mask values selected in `[0,\n 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0,\n 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in\n `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):\n Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`: *attentions*)\n `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden states at\n the output of the last layer of the encoder. Used in the cross-attention of the decoder.\n past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.\n\n If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that\n don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all\n `decoder_input_ids` of shape `(batch_size, sequence_length)`.\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded\n representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be\n input (see `past_key_values`). This is useful if you want more control over how to convert\n `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.\n\n If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value\n of `inputs_embeds`.\n\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`).\n\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n\"\"\"\nT5_ENCODER_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you\n should be able to pad the inputs on both the right and the left.\n\n Indices can be obtained using [`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for detail.\n\n To know more on how to prepare `input_ids` for pretraining take a look a [T5 Training](./t5#training).\n attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n\"\"\"\n__HEAD_MASK_WARNING_MSG = \"\"\"\nThe input argument `head_mask` was split into two arguments `head_mask` and `decoder_head_mask`. Currently,\n`decoder_head_mask` is set to copy `head_mask`, but this feature is deprecated and will be removed in future versions.\nIf you do not want to use any `decoder_head_mask` now, please set `decoder_head_mask = torch.ones(num_layers,\nnum_heads)`.\n\"\"\"\ndef load_tf_weights_in_t5(model, config, tf_checkpoint_path):\n def __init__(self, hidden_size, eps=1e-6):\n def forward(self, hidden_states):\n def __init__(self, config: T5Config):\n def forward(self, hidden_states):\n def __init__(self, config: T5Config):\n def forward(self, hidden_states):\n def __init__(self, config: T5Config):\n def forward(self, hidden_states):\n def __init__(self, config: T5Config, has_relative_attention_bias=False):\n def prune_heads(self, heads):\n def _relative_position_bucket(\n relative_position, bidirectional=True, num_buckets=32, max_distance=128\n ):\n def compute_bias(self, query_length, key_length, device=None):\n def forward(\n self,\n hidden_states,\n mask=None,\n key_value_states=None,\n position_bias=None,\n past_key_value=None,\n layer_head_mask=None,\n query_length=None,\n use_cache=False,\n output_attentions=False,\n ):\n def shape(states):\n def unshape(states):\n def project(hidden_states, proj_layer, key_value_states, past_key_value):\n def __init__(self, config, has_relative_attention_bias=False):\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n position_bias=None,\n layer_head_mask=None,\n past_key_value=None,\n use_cache=False,\n output_attentions=False,\n ):\n def __init__(self, config):\n def forward(\n self,\n hidden_states,\n key_value_states,\n attention_mask=None,\n position_bias=None,\n layer_head_mask=None,\n past_key_value=None,\n use_cache=False,\n query_length=None,\n output_attentions=False,\n ):\n def __init__(self, config, has_relative_attention_bias=False):\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n position_bias=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n encoder_decoder_position_bias=None,\n layer_head_mask=None,\n cross_attn_layer_head_mask=None,\n past_key_value=None,\n use_cache=False,\n output_attentions=False,\n return_dict=True,\n ):\n def dummy_inputs(self):\n def _init_weights(self, module):\n def _set_gradient_checkpointing(self, module, value=False):\n def _shift_right(self, input_ids):\n def __init__(self, config, embed_tokens=None):\n def parallelize(self, device_map=None):\n def deparallelize(self):\n def get_input_embeddings(self):\n def set_input_embeddings(self, new_embeddings):\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n inputs_embeds=None,\n head_mask=None,\n cross_attn_head_mask=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n def create_custom_forward(module):\n def custom_forward(*inputs):\n def __init__(self, config: T5Config):\n def parallelize(self, device_map=None):\n def deparallelize(self):\n def get_input_embeddings(self):\n def set_input_embeddings(self, new_embeddings):\n def get_encoder(self):\n def get_decoder(self):\n def _prune_heads(self, heads_to_prune):\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n decoder_input_ids: Optional[torch.LongTensor] = None,\n decoder_attention_mask: Optional[torch.BoolTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n decoder_head_mask: Optional[torch.FloatTensor] = None,\n cross_attn_head_mask: Optional[torch.Tensor] = None,\n encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,\n past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,\n inputs_embeds: Optional[torch.Tensor] = None,\n decoder_inputs_embeds: Optional[torch.Tensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]:\n def __init__(self, config: T5Config):\n def parallelize(self, device_map=None):\n def deparallelize(self):\n def get_input_embeddings(self):\n def set_input_embeddings(self, new_embeddings):\n def set_output_embeddings(self, new_embeddings):\n def get_output_embeddings(self):\n def get_encoder(self):\n def get_decoder(self):\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n decoder_input_ids: Optional[torch.LongTensor] = None,\n decoder_attention_mask: Optional[torch.BoolTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n decoder_head_mask: Optional[torch.FloatTensor] = None,\n cross_attn_head_mask: Optional[torch.Tensor] = None,\n encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None,\n past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n decoder_inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n reduction: Optional[str] = \"mean\",\n ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]:\n def prepare_inputs_for_generation(\n self,\n input_ids,\n past=None,\n attention_mask=None,\n head_mask=None,\n decoder_head_mask=None,\n cross_attn_head_mask=None,\n use_cache=None,\n encoder_outputs=None,\n **kwargs,\n ):\n def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):\n def _reorder_cache(self, past, beam_idx):\n def __init__(self, config: T5Config):\n def parallelize(self, device_map=None):\n def deparallelize(self):\n def get_input_embeddings(self):\n def set_input_embeddings(self, new_embeddings):\n def get_encoder(self):\n def _prune_heads(self, heads_to_prune):\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple[torch.FloatTensor], BaseModelOutput]:\nclass T5LayerNorm(nn.Module):\nclass T5DenseActDense(nn.Module):\nclass T5DenseGatedActDense(nn.Module):\nclass T5LayerFF(nn.Module):\nclass T5Attention(nn.Module):\nclass T5LayerSelfAttention(nn.Module):\nclass T5LayerCrossAttention(nn.Module):\nclass T5Block(nn.Module):\nclass T5PreTrainedModel(PreTrainedModel):\nclass T5Stack(T5PreTrainedModel):\nclass T5Model(T5PreTrainedModel):\nclass T5ForConditionalGeneration(T5PreTrainedModel):\nclass T5EncoderModel(T5PreTrainedModel):" } ]
import logging import string import random import copy import torch import torch.nn as nn import spacy from torch.cuda.amp import autocast as autocast from transformers import T5TokenizerFast from ...common.registry import registry from .blip2 import Blip2Base, disabled_train from .modeling_t5 import T5Config, T5ForConditionalGeneration from transformers.modeling_outputs import BaseModelOutput
7,305
""" Copyright (c) 2023, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ @registry.register_model("blip2_t5_instruct") class Blip2T5Instruct(Blip2Base): """ BLIP2 T5 model. Supported model types: - flant5xl - flant5xxl Usage: >>> from lavis.models import load_model >>> model = load_model("blip2_t5_instruct", "flant5xl") """ PRETRAINED_MODEL_CONFIG_DICT = { "flant5xl": "configs/models/blip2/blip2_instruct_flant5xl.yaml", "flant5xxl": "configs/models/blip2/blip2_instruct_flant5xxl.yaml", } def __init__( self, vit_model="eva_clip_g", img_size=224, drop_path_rate=0, use_grad_checkpoint=False, vit_precision="fp16", freeze_vit=True, num_query_token=32, t5_model="google/flan-t5-xl", prompt="", max_txt_len=128, max_output_txt_len=256, apply_lemmatizer=False, num_few_shot_examples=0, few_shot_prob=0, qformer_text_input=True, ): """ apply_lemmatizer: when set to True, postprocess predict_answers() result with lemmas. """ super().__init__() self.tokenizer = self.init_tokenizer(truncation_side="left") self.visual_encoder, self.ln_vision = self.init_vision_encoder( vit_model, img_size, drop_path_rate, use_grad_checkpoint, vit_precision ) if freeze_vit: for name, param in self.visual_encoder.named_parameters(): param.requires_grad = False self.visual_encoder = self.visual_encoder.eval()
""" Copyright (c) 2023, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ @registry.register_model("blip2_t5_instruct") class Blip2T5Instruct(Blip2Base): """ BLIP2 T5 model. Supported model types: - flant5xl - flant5xxl Usage: >>> from lavis.models import load_model >>> model = load_model("blip2_t5_instruct", "flant5xl") """ PRETRAINED_MODEL_CONFIG_DICT = { "flant5xl": "configs/models/blip2/blip2_instruct_flant5xl.yaml", "flant5xxl": "configs/models/blip2/blip2_instruct_flant5xxl.yaml", } def __init__( self, vit_model="eva_clip_g", img_size=224, drop_path_rate=0, use_grad_checkpoint=False, vit_precision="fp16", freeze_vit=True, num_query_token=32, t5_model="google/flan-t5-xl", prompt="", max_txt_len=128, max_output_txt_len=256, apply_lemmatizer=False, num_few_shot_examples=0, few_shot_prob=0, qformer_text_input=True, ): """ apply_lemmatizer: when set to True, postprocess predict_answers() result with lemmas. """ super().__init__() self.tokenizer = self.init_tokenizer(truncation_side="left") self.visual_encoder, self.ln_vision = self.init_vision_encoder( vit_model, img_size, drop_path_rate, use_grad_checkpoint, vit_precision ) if freeze_vit: for name, param in self.visual_encoder.named_parameters(): param.requires_grad = False self.visual_encoder = self.visual_encoder.eval()
self.visual_encoder.train = disabled_train
2
2023-12-05 14:17:17+00:00
12k
modelscope/llmuses
llmuses/run_ms.py
[ { "identifier": "DATASET_ID", "path": "llmuses/benchmarks/ceval/ceval_adapter.py", "snippet": "DATASET_ID = 'modelscope/ceval-exam'" }, { "identifier": "DATASET_ID", "path": "llmuses/benchmarks/mmlu/mmlu_adapter.py", "snippet": "DATASET_ID = 'modelscope/mmlu'" }, { "identifier": "DATASET_ID", "path": "llmuses/benchmarks/hellaswag/hellaswag_adapter.py", "snippet": "DATASET_ID = 'modelscope/hellaswag'" }, { "identifier": "DATASET_ID", "path": "llmuses/benchmarks/arc/arc_adapter.py", "snippet": "DATASET_ID = 'modelscope/ai2_arc'" }, { "identifier": "DATASET_ID", "path": "llmuses/benchmarks/truthful_qa/truthful_qa_adapter.py", "snippet": "DATASET_ID = 'modelscope/truthful_qa'" }, { "identifier": "DEFAULT_ROOT_CACHE_DIR", "path": "llmuses/constants.py", "snippet": "DEFAULT_ROOT_CACHE_DIR = '~/.cache/llmuses'" }, { "identifier": "Evaluator", "path": "llmuses/evaluator/evaluator.py", "snippet": "class Evaluator(object):\n\n \"\"\"\n The evaluator for model on datasets.\n \"\"\"\n\n def __init__(self,\n dataset_name_or_path: str,\n data_adapter: DataAdapter,\n subset_list: Optional[list] = None,\n model_adapter: Optional[BaseModelAdapter] = None,\n use_cache: bool = True,\n mem_cache_method: str = 'ttl',\n root_cache_dir: Optional[str] = DEFAULT_ROOT_CACHE_DIR,\n outputs_dir: Optional[str] = '',\n is_custom_outputs_dir: bool = False,\n datasets_dir: Optional[str] = DEFAULT_ROOT_CACHE_DIR,\n stage: Optional[str] = 'all',\n **kwargs):\n\n self.dataset_name_or_path = dataset_name_or_path\n self.root_cache_dir = os.path.expanduser(root_cache_dir)\n self.datasets_dir = os.path.expanduser(datasets_dir)\n self.kwargs = kwargs\n self.data_adapter = data_adapter\n self.model_adapter = model_adapter\n\n self.model_cfg = self.model_adapter.model_cfg\n self.model_id = self.model_cfg['model_id']\n self.model_revision = self.model_cfg.get('revision', None)\n self.model_revision_str = self.model_revision if self.model_revision is not None else 'none'\n\n # Get default outputs_dir\n if not is_custom_outputs_dir:\n outputs_dir = make_outputs_dir(work_dir=outputs_dir,\n model_id=self.model_id,\n model_revision=self.model_revision_str)\n\n self.outputs_dir = os.path.expanduser(outputs_dir)\n\n # Deal with the output paths\n self.outputs_structure = make_outputs_structure(self.outputs_dir)\n\n # Load dataset\n self.dataset = self.data_adapter.load(dataset_name_or_path=dataset_name_or_path,\n subset_list=subset_list,\n work_dir=self.datasets_dir,\n **kwargs)\n\n # Get prompts from dataset\n self.prompts = self.data_adapter.gen_prompts(data_dict=self.dataset)\n del self.dataset\n\n # Init memory cache\n # TODO: refactor mem cache manager\n mem_cache_file_name = self.dataset_name_or_path.replace('/', '_') + \\\n '_' + self.model_id.replace('/', '_') + \\\n '_' + self.model_revision_str + \\\n '_cache.pkl'\n self.mem_cache_path = os.path.join(self.root_cache_dir, 'mem_cache', mem_cache_file_name)\n self.use_cache = use_cache\n self.mem_cache_method = mem_cache_method\n self.mem_cache = None\n if self.use_cache:\n self.mem_cache = init_mem_cache(method=self.mem_cache_method, cache_file_path=self.mem_cache_path)\n logger.info(f'** Using memory cache with size: {len(self.mem_cache)}')\n\n def _pred_answer(self,\n input_d: dict,\n infer_cfg: dict,\n subset_name: str,\n answer_id: str = None) -> dict:\n\n # Get answer from memory cache\n if self.mem_cache is not None:\n if answer_id in self.mem_cache:\n logger.info(f'** Reusing answer `{answer_id}` in memory cache.')\n return self.mem_cache[answer_id]\n\n ans: dict = self.model_adapter.predict(inputs=input_d, infer_cfg=infer_cfg)\n ans[AnswerKeys.ANSWER_ID] = answer_id\n ans[AnswerKeys.SUBSET_NAME] = subset_name\n\n if self.mem_cache is not None:\n self.mem_cache[answer_id] = ans\n\n return ans\n\n def get_answers(self,\n subset_name: str,\n prompts_list: List[dict],\n infer_cfg: dict = None,\n debug: bool = False,\n **kwargs) -> list:\n \"\"\"\n Get answers from model inference.\n It is required to rewrite this method to support your own evaluator.\n\n Args:\n subset_name: subset name for benchmark.\n prompts_list: prompts list.\n infer_cfg: model inference config.\n Attributes:\n do_sample: bool, whether to use sampling.\n top_k: int, the number of highest probability vocabulary tokens to keep for top-k-filtering.\n top_p: float, if set to float < 1, only the most probable tokens with probabilities to add.\n temperature: float, the value used to module the next token probabilities.\n num_beams: int, number of beams for beam search. 1 means no beam search.\n max_length: int, the max length of the sequence to be generated.\n max_new_tokens: int, the max number of new tokens to be generated.\n repetition_penalty: float, the parameter for repetition penalty. 1.0 means no penalty.\n debug: whether to run in debug mode.\n **kwargs: kwargs.\n\n Returns: The list of answers.\n \"\"\"\n assert self.data_adapter is not None, 'data_adapter must be provided when calling func get_answers() !'\n assert self.model_adapter is not None, 'model must be provided when calling func get_answers() !'\n\n answers_list = []\n for input_prompt in tqdm(prompts_list, total=len(prompts_list), desc=f'Predicting({subset_name}): '):\n\n # Gen answer_id (concat: model_cfg + input_prompt + infer_cfg)\n model_cfg_str = json.dumps(\n OrderedDict(sorted(dict_torch_dtype_to_str(self.model_adapter.model_cfg).items())),\n ensure_ascii=False)\n input_prompt_str = json.dumps(OrderedDict(sorted(dict_torch_dtype_to_str(input_prompt).items())),\n ensure_ascii=False)\n infer_cfg_str = json.dumps(OrderedDict(sorted(dict_torch_dtype_to_str(infer_cfg).items())),\n ensure_ascii=False)\n answer_id = 'answer-' + gen_hash(model_cfg_str + input_prompt_str + infer_cfg_str)\n\n # Get answers\n answer_d: dict = self._pred_answer(input_d=input_prompt,\n infer_cfg=infer_cfg,\n subset_name=subset_name,\n answer_id=answer_id)\n\n answer_d[AnswerKeys.MODEL_SPEC] = self.model_adapter.model_cfg\n answer_d[AnswerKeys.RAW_INPUT] = input_prompt[AnswerKeys.RAW_INPUT]\n answer_d[AnswerKeys.ORIGIN_PROMPT] = input_prompt\n\n if debug:\n logger.debug(f'**input_prompt: {json.dumps(input_prompt, ensure_ascii=False)} \\n')\n logger.debug(f'**predicted ans: {json.dumps(answer_d, ensure_ascii=False)} \\n')\n\n answers_list.append(answer_d)\n\n # Dump answers\n pred_dir: str = self.outputs_structure.get(OutputsStructure.PREDICTIONS_DIR)\n pred_file_name: str = self.dataset_name_or_path.replace('/', '_') + '_' + subset_name + '.jsonl'\n os.makedirs(pred_dir, exist_ok=True)\n dump_jsonl_data(answers_list, os.path.join(pred_dir, pred_file_name))\n\n return answers_list\n\n def _get_review(self,\n answer_d: dict,\n review_id: str = None,\n reviewer_spec: dict = None) -> dict:\n\n # Get review from memory cache\n if self.mem_cache is not None:\n if review_id in self.mem_cache:\n logger.info(f'** Reusing review `{review_id}` in memory cache.')\n return self.mem_cache[review_id]\n\n if reviewer_spec is None:\n reviewer_spec = {}\n\n review_res = deepcopy(answer_d)\n choices = review_res[AnswerKeys.CHOICES]\n if len(choices) == 0:\n review_res[ReviewKeys.REVIEWED] = False\n review_res[ReviewKeys.REVIEW_ID] = None\n review_res[ReviewKeys.REVIEWER_SPEC] = reviewer_spec\n review_res[ReviewKeys.REVIEW_TIME] = time.time()\n return review_res\n\n rev_choices = []\n for choice in choices:\n raw_input_d: dict = review_res[AnswerKeys.RAW_INPUT]\n answer_content = choice[ReviewKeys.MESSAGE][ReviewKeys.CONTENT]\n answer_content = self.data_adapter.parse_pred_result(answer_content, raw_input_d)\n gold_content = self.data_adapter.get_gold_answer(raw_input_d)\n\n review_result = self.data_adapter.match(gold_content, answer_content)\n choice[ReviewKeys.REVIEW] = {ReviewKeys.GOLD: gold_content,\n ReviewKeys.PRED: answer_content,\n ReviewKeys.RESULT: review_result}\n\n rev_choices.append(choice)\n\n review_res[AnswerKeys.CHOICES] = rev_choices\n review_res[ReviewKeys.REVIEWED] = True\n review_res[ReviewKeys.REVIEW_ID] = review_id\n review_res[ReviewKeys.REVIEWER_SPEC] = reviewer_spec\n review_res[ReviewKeys.REVIEW_TIME] = time.time()\n\n if self.mem_cache is not None:\n self.mem_cache[review_id] = review_res\n\n return review_res\n\n def get_reviews(self, subset_name: str, answers_list: List[dict], debug: bool = False, **kwargs) -> list:\n \"\"\"\n Get reviews from answers.\n It is required to rewrite this method to support your own evaluator.\n\n Args:\n subset_name: subset name of benchmark\n answers_list: inference results list.\n debug: whether to run in debug mode.\n **kwargs: kwargs.\n\n Returns: reviews list.\n \"\"\"\n reviews_list = []\n for answer_d in tqdm(answers_list, total=len(answers_list), desc=f'Reviewing({subset_name}): '):\n\n # Gen review_id (concat: answer_id + reviewer_spec)\n answer_id = answer_d[AnswerKeys.ANSWER_ID]\n\n reviewer_spec: dict = {'metric': [metric_d['name'] for metric_d in self.data_adapter.metric_list],\n 'reviewer': ['Evaluator'],\n 'revision': ['default']}\n reviewer_spec_str = json.dumps(OrderedDict(sorted(dict_torch_dtype_to_str(reviewer_spec).items())),\n ensure_ascii=False)\n review_id = 'review-' + gen_hash(answer_id + reviewer_spec_str)\n\n # Get review\n review_d = self._get_review(answer_d=answer_d, review_id=review_id, reviewer_spec=reviewer_spec)\n\n if debug:\n logger.debug(review_d)\n\n reviews_list.append(review_d)\n\n # Dump reviews\n review_dir: str = self.outputs_structure.get(OutputsStructure.REVIEWS_DIR)\n review_file_name: str = self.dataset_name_or_path.replace('/', '_') + '_' + subset_name + '.jsonl'\n os.makedirs(review_dir, exist_ok=True)\n dump_jsonl_data(reviews_list, os.path.join(review_dir, review_file_name))\n\n return reviews_list\n\n def compute_metrics(self, reviews_list: List[dict]) -> Any:\n \"\"\"\n To compute metrics from reviews_list for each subset.\n It is required to rewrite this method to support your own evaluator.\n\n Args:\n reviews_list: reviews list.\n\n Returns:\n The metric result. Depends on the metric function in data_adapter.\n \"\"\"\n\n review_res_list = []\n for review_d in reviews_list:\n if not review_d[ReviewKeys.REVIEWED]:\n logger.warning(f'** Review not finished for answer_id: {review_d[AnswerKeys.ANSWER_ID]}')\n continue\n\n review_res = review_d[AnswerKeys.CHOICES][0][ReviewKeys.REVIEW][ReviewKeys.RESULT]\n review_res_list.append(review_res)\n\n metric_score: Union[float, dict] = self.data_adapter.compute_metric(review_res_list=review_res_list)\n\n return metric_score\n\n def dump_report(self, report_map: dict, use_table: bool = True):\n \"\"\"\n Get report for total reviews of specific dataset.\n It is required to rewrite this method to support your own evaluator.\n\n Args:\n report_map: report dict. Generated by func self.data_adapter.gen_report().\n use_table: whether to generate table for reports. Default to True.\n\n Returns: None\n \"\"\"\n\n # Dump report\n report_dir: str = self.outputs_structure[OutputsStructure.REPORTS_DIR]\n report_file_name: str = self.dataset_name_or_path.replace('/', '_') + '.json'\n os.makedirs(report_dir, exist_ok=True)\n report_path: str = os.path.join(report_dir, report_file_name)\n with open(report_path, 'w') as f:\n f.write(json.dumps(report_map, ensure_ascii=False, indent=4))\n # logger.info(f'** Dump report to {report_path} \\n')\n logger.info(f'** Dump report: {report_file_name} \\n')\n\n if use_table:\n try:\n # Make table\n report_table: str = gen_table([report_dir])\n logger.info(f'** Report table: \\n {report_table} \\n')\n except:\n logger.error('Failed to generate report table.')\n\n def save_cache(self):\n if self.mem_cache is not None:\n logger.info(f'** Saving memory cache with size: {len(self.mem_cache)}')\n Cache.save(cache=self.mem_cache, path=self.mem_cache_path)\n\n def clear_cache(self):\n \"\"\"\n Clear memory cache.\n\n Returns: None\n \"\"\"\n if self.mem_cache is not None:\n cache_len = len(self.mem_cache)\n self.mem_cache.clear()\n logger.info(f'** Memory cache cleared, length changed: {cache_len} -> {len(self.mem_cache)}')\n\n def eval(self,\n infer_cfg: dict = None,\n debug: bool = False,\n **kwargs):\n \"\"\"\n Evaluate the model on the specific benchmark. Streaming & parallel mode is supported.\n It is required to rewrite this method to support your own evaluator.\n\n The evaluation process is as follows:\n 1. Get the input samples from the dataset (benchmarks on the ModelScope or HuggingFace).\n 2. Get the input prompts from dataset with specific data adapter.\n 3. Get answers with model inference.\n 4. Get reviews with metric function (or reviewers).\n 5. Generate report from review results.\n\n Args:\n infer_cfg: The config for model inference.\n debug: Whether to run in debug mode. Default: False.\n\n Returns:\n None.\n \"\"\"\n\n logger.info(f'**** Start evaluating on dataset {self.dataset_name_or_path} ****')\n\n reviews_map_all = {} # {subset_name: (score, num)}\n for subset_name, prompts_list in self.prompts.items():\n limit = infer_cfg.get('limit', len(prompts_list))\n prompts_list = prompts_list[:limit]\n\n answers_list: list = self.get_answers(subset_name=subset_name,\n prompts_list=prompts_list,\n infer_cfg=infer_cfg,\n debug=debug,\n **kwargs)\n\n reviews_list: list = self.get_reviews(subset_name=subset_name,\n answers_list=answers_list,\n debug=debug,\n **kwargs)\n\n metric_res = self.compute_metrics(reviews_list=reviews_list)\n reviews_map_all[subset_name] = (metric_res, len(reviews_list))\n\n # Generate report\n report_map: dict = self.data_adapter.gen_report(subset_score_map=reviews_map_all)\n self.dump_report(report_map=report_map)\n\n self.save_cache()\n self.clear_cache()\n\n logger.info(f'\\n**** Evaluation finished on {self.dataset_name_or_path} ****\\n')" }, { "identifier": "MultiChoiceModelAdapter", "path": "llmuses/models/model_adapter.py", "snippet": "class MultiChoiceModelAdapter(BaseModelAdapter):\n \"\"\" The multi-choice model adapter. \"\"\"\n\n _DEFAULT_MAX_LENGTH = 2048\n\n def __init__(self,\n model_id: str,\n device_map: str = 'auto',\n torch_dtype: dtype = torch.bfloat16,\n model_revision: str = None,\n max_length: int = None,\n **kwargs):\n \"\"\"\n Args:\n model_id: The model id on ModelScope, or local model_dir. TODO: torch.nn.module to be supported.\n device_map: The device map for model inference.\n torch_dtype: The torch dtype for model inference. Default: torch.bfloat16.\n model_revision: The model revision on ModelScope. Default: None.\n max_length: The max length of input sequence. Default: None.\n **kwargs: Other args.\n \"\"\"\n\n self.model_id: str = model_id\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n torch_dtype = torch_dtype if torch_dtype is not None else 'auto'\n\n model_cfg: dict = dict()\n model_cfg['model_id'] = model_id\n model_cfg['device_map'] = device_map\n model_cfg['torch_dtype'] = str(torch_dtype)\n\n from modelscope.utils.hf_util import AutoModelForCausalLM, AutoTokenizer\n\n tokenizer = AutoTokenizer.from_pretrained(self.model_id,\n revision=model_revision,\n trust_remote_code=True,)\n\n model = AutoModelForCausalLM.from_pretrained(self.model_id,\n revision=model_revision,\n device_map=device_map,\n trust_remote_code=True,\n torch_dtype=torch_dtype,)\n\n # model.generation_config = GenerationConfig.from_pretrained(model_id, trust_remote_code=True)\n\n super().__init__(model=model, tokenizer=tokenizer, model_cfg=model_cfg)\n\n self._max_length = max_length\n\n @property\n def max_length(self):\n if self._max_length:\n return self._max_length\n seqlen_config_attrs = ('n_positions', 'max_position_embeddings', 'n_ctx')\n for attr in seqlen_config_attrs:\n if hasattr(self.model.config, attr):\n return getattr(self.model.config, attr)\n if hasattr(self.tokenizer, 'model_max_length'):\n if self.tokenizer.model_max_length == 1000000000000000019884624838656:\n return self._DEFAULT_MAX_LENGTH\n return self.tokenizer.model_max_length\n return self._DEFAULT_MAX_LENGTH\n\n @torch.no_grad()\n def predict(self, inputs: dict, infer_cfg: dict = None) -> dict:\n \"\"\"\n Multi-choice model prediction func.\n\n Args:\n inputs (dict): The inputs for a doc. Format:\n {'data': [full_prompt], 'multi_choices': ['A', 'B', 'C', 'D']}\n\n infer_cfg (dict): inference configuration.\n\n Returns:\n res (dict): The model prediction results. Format:\n {\n 'choices': [\n {\n 'index': 0,\n 'message': {\n 'content': [-14.9609, -13.6015, ...], # loglikelihood values for inputs context-continuation pairs.\n 'role': 'assistant'\n }\n }\n ],\n 'created': 1677664795,\n # For models on the ModelScope or HuggingFace, concat model_id and revision with \"-\".\n 'model': 'gpt-3.5-turbo-0613',\n 'object': 'chat.completion',\n 'usage': {\n 'completion_tokens': 17,\n 'prompt_tokens': 57,\n 'total_tokens': 74\n }\n }\n \"\"\"\n\n # TODO: unused\n if infer_cfg is None:\n infer_cfg = {'do_sample': True, 'max_length': 1024}\n\n input_data = inputs['data']\n multi_choices = inputs['multi_choices']\n\n output, input_info = self._get_logits(self.tokenizer, self.model, input_data)\n assert output.shape[0] == 1\n logits = output.flatten()\n\n choice_logits = [logits[self.tokenizer(ch)['input_ids'][-1:]] for ch in multi_choices]\n softval = torch.nn.functional.softmax(torch.tensor(choice_logits).float(), dim=0)\n\n if softval.dtype in {torch.bfloat16, torch.float16}:\n softval = softval.to(dtype=torch.float32)\n probs = softval.detach().cpu().numpy()\n pred: str = multi_choices[int(np.argmax(probs))] # Format: A or B or C or D\n\n res_d = {\n 'choices': [\n {\n 'index': 0,\n 'message': {\n 'content': pred,\n 'role': 'assistant'\n }\n }\n ],\n 'created': time.time(),\n 'model': self.model_id,\n 'object': 'chat.completion',\n 'usage': {}\n }\n\n return res_d\n\n @staticmethod\n def _get_logits(tokenizer, model, inputs: List[str]):\n input_ids = tokenizer(inputs, padding=False)['input_ids']\n input_ids = torch.tensor(input_ids, device=model.device)\n tokens = {'input_ids': input_ids}\n\n outputs = model(input_ids)['logits']\n logits = outputs[:, -1, :]\n log_probs = torch.nn.functional.softmax(logits, dim=-1)\n return log_probs, {'tokens': tokens}" }, { "identifier": "ContinuationLogitsModelAdapter", "path": "llmuses/models/model_adapter.py", "snippet": "class ContinuationLogitsModelAdapter(MultiChoiceModelAdapter):\n\n def __init__(self,\n model_id: str,\n device_map: str = 'auto',\n torch_dtype: dtype = torch.bfloat16,\n model_revision: str = None,\n **kwargs):\n \"\"\"\n Continuation-logits model adapter.\n\n Args:\n model_id: The model id on ModelScope, or local model_dir.\n device_map: The device map for model inference.\n torch_dtype: The torch dtype for model inference. Default: torch.bfloat16.\n model_revision: The model revision on ModelScope. Default: None.\n **kwargs: Other args.\n \"\"\"\n\n super().__init__(model_id=model_id,\n device_map=device_map,\n torch_dtype=torch_dtype,\n model_revision=model_revision,\n **kwargs)\n\n @torch.no_grad()\n def predict(self, inputs: dict, infer_cfg: dict = None) -> dict:\n \"\"\"\n Multi-choice model prediction func.\n Args:\n inputs (dict): The inputs for a doc. Format:\n {'data': [(context, continuation), ...]}\n infer_cfg (dict): inference configuration.\n Returns:\n res (dict): The model prediction results. Format:\n {\n 'choices': [\n {\n 'index': 0,\n 'message': {\n 'content': [-14.9609, -13.6015, ...], # loglikelihood values for inputs context-continuation pairs.\n 'role': 'assistant'\n }\n }\n ],\n 'created': 1677664795,\n # For models on the ModelScope or HuggingFace, concat model_id and revision with \"-\".\n 'model': 'gpt-3.5-turbo-0613',\n 'object': 'chat.completion',\n 'usage': {\n 'completion_tokens': 17,\n 'prompt_tokens': 57,\n 'total_tokens': 74\n }\n }\n \"\"\"\n if infer_cfg is None:\n infer_cfg = {'do_sample': True, 'max_length': 2048}\n\n pred_list: list = self.loglikelihood(inputs=inputs['data'], infer_cfg=infer_cfg)\n\n res_d = {\n 'choices': [\n {\n 'index': 0,\n 'message': {\n 'content': pred_list,\n 'role': 'assistant'\n }\n }\n ],\n 'created': time.time(),\n 'model': self.model_id,\n 'object': 'chat.completion',\n 'usage': {}\n }\n return res_d\n\n def loglikelihood(self, inputs: list, infer_cfg: dict = None) -> list:\n # To predict one doc\n doc_ele_pred = []\n for ctx, continuation in inputs:\n\n # ctx_enc shape: [context_tok_len] cont_enc shape: [continuation_tok_len]\n ctx_enc, cont_enc = self._encode_pair(ctx, continuation)\n\n inputs_tokens = torch.tensor(\n (ctx_enc.tolist() + cont_enc.tolist())[-(self.max_length + 1):][:-1],\n dtype=torch.long,\n device=self.model.device).unsqueeze(0)\n\n logits = self.model(inputs_tokens)[0]\n logits = torch.nn.functional.log_softmax(logits.float(), dim=-1)\n\n logits = logits[:, -len(cont_enc):, :]\n cont_enc = cont_enc.unsqueeze(0).unsqueeze(-1)\n logits = torch.gather(logits.cpu(), 2, cont_enc.cpu()).squeeze(-1)\n\n choice_score = float(logits.sum())\n doc_ele_pred.append(choice_score)\n\n # e.g. [-2.3, -9.2, -12.9, 1.1], length=len(choices)\n return doc_ele_pred\n\n def _encode_pair(self, context, continuation):\n n_spaces = len(context) - len(context.rstrip())\n if n_spaces > 0:\n continuation = context[-n_spaces:] + continuation\n context = context[:-n_spaces]\n\n whole_enc = self.tokenizer(context + continuation, padding=False)['input_ids']\n whole_enc = torch.tensor(whole_enc, device=self.device)\n\n context_enc = self.tokenizer(context, padding=False)['input_ids']\n context_enc = torch.tensor(context_enc, device=self.device)\n\n context_enc_len = len(context_enc)\n continuation_enc = whole_enc[context_enc_len:]\n\n return context_enc, continuation_enc" }, { "identifier": "get_logger", "path": "llmuses/utils/logger.py", "snippet": "def get_logger(log_file: Optional[str] = None,\n log_level: int = logging.INFO,\n file_mode: str = 'w'):\n \"\"\" Get logging logger\n\n Args:\n log_file: Log filename, if specified, file handler will be added to\n logger\n log_level: Logging level.\n file_mode: Specifies the mode to open the file, if filename is\n specified (if filemode is unspecified, it defaults to 'w').\n \"\"\"\n\n logger_name = __name__.split('.')[0]\n logger = logging.getLogger(logger_name)\n\n if logger_name in init_loggers:\n add_file_handler_if_needed(logger, log_file, file_mode, log_level)\n return logger\n\n for handler in logger.root.handlers:\n if type(handler) is logging.StreamHandler:\n handler.setLevel(logging.ERROR)\n\n stream_handler = logging.StreamHandler()\n handlers = [stream_handler]\n\n if log_file is not None:\n file_handler = logging.FileHandler(log_file, file_mode)\n handlers.append(file_handler)\n\n for handler in handlers:\n handler.setFormatter(formatter)\n handler.setLevel(log_level)\n logger.addHandler(handler)\n\n logger.setLevel(log_level)\n\n init_loggers[logger_name] = True\n\n return logger" } ]
import argparse import torch from llmuses.benchmarks.ceval import DATASET_ID as CEVAL_EXAM from llmuses.benchmarks.mmlu import DATASET_ID as MMLU from llmuses.benchmarks.hellaswag import DATASET_ID as HELLA_SWAG from llmuses.benchmarks.arc import DATASET_ID as ARC from llmuses.benchmarks.truthful_qa import DATASET_ID as TRUTHFUL_QA from llmuses.constants import DEFAULT_ROOT_CACHE_DIR from llmuses.evaluator import Evaluator from llmuses.models.model_adapter import MultiChoiceModelAdapter, ContinuationLogitsModelAdapter from llmuses.utils.logger import get_logger from llmuses.models.dummy_chat_model import DummyChatModel from llmuses.benchmarks.ceval import CEVALAdapter from llmuses.benchmarks.mmlu import MMLUAdapter from llmuses.benchmarks.arc import ARCAdapter from llmuses.benchmarks.hellaswag import HellaSwagAdapter from llmuses.benchmarks.truthful_qa import TruthfulQaAdapter
7,598
# Copyright (c) Alibaba, Inc. and its affiliates. # flake8: noqa logger = get_logger() # TODO: add more precision MODEL_PRECISION_MAP = {'fp16': torch.float16, 'fp32': torch.float32, 'bf16': torch.bfloat16} """ Run evaluation process for ModelScope Leaderboard. """ def parse_args(): parser = argparse.ArgumentParser(description='Run evaluation on a model') parser.add_argument('--model', help='Model id from modelscope or huggingface.', required=True) parser.add_argument('--revision', help='Model revision.', required=False, default=None) parser.add_argument('--precision', help='Model precision.', default='bf16') parser.add_argument('--work-dir', help='root work cache dir.', default=None) parser.add_argument('--outputs-dir', help='Outputs dir.', default='outputs') parser.add_argument('--datasets-dir', help='Datasets dir.', default=DEFAULT_ROOT_CACHE_DIR) parser.add_argument('--device-map', help='device map.', default='auto') parser.add_argument('--max-eval-size', type=int, help='Max evaluation samples num for each subset', default=None) parser.add_argument('--dataset-id', help='Dataset id on modelscope', required=False, default=None) parser.add_argument('--debug', help='Debug mode, will print information for debugging.', action='store_true', default=False) parser.add_argument('--dry-run', help='Dry run in single processing mode.', action='store_true', default=False) parser.add_argument('--mem-cache', help='To use memory cache or not.', action='store_true', default=False) args = parser.parse_args() return args def main(): args = parse_args() logger.info(args) # Customize your target datasets here all_benchmarks = [CEVAL_EXAM, MMLU, ARC, HELLA_SWAG, TRUTHFUL_QA] dataset_id = args.dataset_id if dataset_id is None: datasets = all_benchmarks elif dataset_id in all_benchmarks: datasets = [dataset_id] else: raise ValueError(f'Unknown dataset: {dataset_id}, Supported datasets: {all_benchmarks}') # Get model instance if args.dry_run: model_adapter = DummyChatModel(model_cfg=dict()) # TODO model_id: str = 'dummy' model_revision: str = 'v1.0.0' model_precision = MODEL_PRECISION_MAP.get(args.precision, torch.bfloat16) else: model_id: str = args.model model_revision: str = args.revision model_precision = MODEL_PRECISION_MAP.get(args.precision, torch.bfloat16) model_adapter = MultiChoiceModelAdapter(model_id=model_id, device_map=args.device_map, torch_dtype=model_precision, model_revision=model_revision,) # Evaluate on each dataset for dataset_name in datasets: if dataset_name == CEVAL_EXAM: data_adapter = CEVALAdapter() elif dataset_name == MMLU: data_adapter = MMLUAdapter() elif dataset_name == ARC: data_adapter = ARCAdapter() elif dataset_name == HELLA_SWAG: # Note: HellaSwag should run few-shot eval data_adapter = HellaSwagAdapter() elif dataset_name == TRUTHFUL_QA: data_adapter = TruthfulQaAdapter() # TODO: add more datasets here else: raise ValueError(f'Unknown dataset: {dataset_name}') # TODO: add mapping if dataset_name in {TRUTHFUL_QA, HELLA_SWAG} and not args.dry_run:
# Copyright (c) Alibaba, Inc. and its affiliates. # flake8: noqa logger = get_logger() # TODO: add more precision MODEL_PRECISION_MAP = {'fp16': torch.float16, 'fp32': torch.float32, 'bf16': torch.bfloat16} """ Run evaluation process for ModelScope Leaderboard. """ def parse_args(): parser = argparse.ArgumentParser(description='Run evaluation on a model') parser.add_argument('--model', help='Model id from modelscope or huggingface.', required=True) parser.add_argument('--revision', help='Model revision.', required=False, default=None) parser.add_argument('--precision', help='Model precision.', default='bf16') parser.add_argument('--work-dir', help='root work cache dir.', default=None) parser.add_argument('--outputs-dir', help='Outputs dir.', default='outputs') parser.add_argument('--datasets-dir', help='Datasets dir.', default=DEFAULT_ROOT_CACHE_DIR) parser.add_argument('--device-map', help='device map.', default='auto') parser.add_argument('--max-eval-size', type=int, help='Max evaluation samples num for each subset', default=None) parser.add_argument('--dataset-id', help='Dataset id on modelscope', required=False, default=None) parser.add_argument('--debug', help='Debug mode, will print information for debugging.', action='store_true', default=False) parser.add_argument('--dry-run', help='Dry run in single processing mode.', action='store_true', default=False) parser.add_argument('--mem-cache', help='To use memory cache or not.', action='store_true', default=False) args = parser.parse_args() return args def main(): args = parse_args() logger.info(args) # Customize your target datasets here all_benchmarks = [CEVAL_EXAM, MMLU, ARC, HELLA_SWAG, TRUTHFUL_QA] dataset_id = args.dataset_id if dataset_id is None: datasets = all_benchmarks elif dataset_id in all_benchmarks: datasets = [dataset_id] else: raise ValueError(f'Unknown dataset: {dataset_id}, Supported datasets: {all_benchmarks}') # Get model instance if args.dry_run: model_adapter = DummyChatModel(model_cfg=dict()) # TODO model_id: str = 'dummy' model_revision: str = 'v1.0.0' model_precision = MODEL_PRECISION_MAP.get(args.precision, torch.bfloat16) else: model_id: str = args.model model_revision: str = args.revision model_precision = MODEL_PRECISION_MAP.get(args.precision, torch.bfloat16) model_adapter = MultiChoiceModelAdapter(model_id=model_id, device_map=args.device_map, torch_dtype=model_precision, model_revision=model_revision,) # Evaluate on each dataset for dataset_name in datasets: if dataset_name == CEVAL_EXAM: data_adapter = CEVALAdapter() elif dataset_name == MMLU: data_adapter = MMLUAdapter() elif dataset_name == ARC: data_adapter = ARCAdapter() elif dataset_name == HELLA_SWAG: # Note: HellaSwag should run few-shot eval data_adapter = HellaSwagAdapter() elif dataset_name == TRUTHFUL_QA: data_adapter = TruthfulQaAdapter() # TODO: add more datasets here else: raise ValueError(f'Unknown dataset: {dataset_name}') # TODO: add mapping if dataset_name in {TRUTHFUL_QA, HELLA_SWAG} and not args.dry_run:
model_adapter = ContinuationLogitsModelAdapter(model_id=model_id,
8
2023-12-07 06:10:49+00:00
12k
liujin112/PortraitDiffusion
main.py
[ { "identifier": "MasaCtrlPipeline", "path": "utils/pipeline.py", "snippet": "class MasaCtrlPipeline(StableDiffusionPipeline):\n\n def next_step(\n self,\n model_output: torch.FloatTensor,\n timestep: int,\n x: torch.FloatTensor,\n eta=0.,\n verbose=False\n ):\n \"\"\"\n Inverse sampling for DDIM Inversion\n \"\"\"\n if verbose:\n print(\"timestep: \", timestep)\n next_step = timestep\n timestep = min(timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps, 999)\n alpha_prod_t = self.scheduler.alphas_cumprod[timestep] if timestep >= 0 else self.scheduler.final_alpha_cumprod\n alpha_prod_t_next = self.scheduler.alphas_cumprod[next_step]\n beta_prod_t = 1 - alpha_prod_t\n pred_x0 = (x - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5\n pred_dir = (1 - alpha_prod_t_next)**0.5 * model_output\n x_next = alpha_prod_t_next**0.5 * pred_x0 + pred_dir\n return x_next, pred_x0\n\n def step(\n self,\n model_output: torch.FloatTensor,\n timestep: int,\n x: torch.FloatTensor,\n eta: float=0.0,\n verbose=False,\n ):\n \"\"\"\n predict the sampe the next step in the denoise process.\n \"\"\"\n prev_timestep = timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps\n alpha_prod_t = self.scheduler.alphas_cumprod[timestep]\n alpha_prod_t_prev = self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep > 0 else self.scheduler.final_alpha_cumprod\n beta_prod_t = 1 - alpha_prod_t\n pred_x0 = (x - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5\n pred_dir = (1 - alpha_prod_t_prev)**0.5 * model_output\n x_prev = alpha_prod_t_prev**0.5 * pred_x0 + pred_dir\n return x_prev, pred_x0\n\n @torch.no_grad()\n def image2latent(self, image):\n DEVICE = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n if type(image) is Image:\n image = np.array(image)\n image = torch.from_numpy(image).float() / 127.5 - 1\n image = image.permute(2, 0, 1).unsqueeze(0).to(DEVICE)\n # input image density range [-1, 1]\n latents = self.vae.encode(image)['latent_dist'].mean\n latents = latents * 0.18215\n return latents\n\n @torch.no_grad()\n def latent2image(self, latents, return_type='np'):\n latents = 1 / 0.18215 * latents.detach()\n image = self.vae.decode(latents)['sample']\n if return_type == 'np':\n image = (image / 2 + 0.5).clamp(0, 1)\n image = image.cpu().permute(0, 2, 3, 1).numpy()[0]\n image = (image * 255).astype(np.uint8)\n elif return_type == \"pt\":\n image = (image / 2 + 0.5).clamp(0, 1)\n\n return image\n\n def latent2image_grad(self, latents):\n latents = 1 / 0.18215 * latents\n image = self.vae.decode(latents)['sample']\n\n return image # range [-1, 1]\n\n @torch.no_grad()\n def __call__(\n self,\n prompt,\n batch_size=1,\n height=512,\n width=512,\n num_inference_steps=50,\n guidance_scale=7.5,\n eta=0.0,\n latents=None,\n unconditioning=None,\n neg_prompt=None,\n ref_intermediate_latents=None,\n return_intermediates=False,\n lcm_lora=False,\n de_bug=False,\n **kwds):\n DEVICE = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n if isinstance(prompt, list):\n batch_size = len(prompt)\n elif isinstance(prompt, str):\n if batch_size > 1:\n prompt = [prompt] * batch_size\n\n # text embeddings\n text_input = self.tokenizer(\n prompt,\n padding=\"max_length\",\n max_length=77,\n return_tensors=\"pt\"\n )\n\n text_embeddings = self.text_encoder(text_input.input_ids.to(DEVICE))[0]\n print(\"input text embeddings :\", text_embeddings.shape)\n # if kwds.get(\"dir\"):\n # dir = text_embeddings[-2] - text_embeddings[-1]\n # u, s, v = torch.pca_lowrank(dir.transpose(-1, -2), q=1, center=True)\n # text_embeddings[-1] = text_embeddings[-1] + kwds.get(\"dir\") * v\n # print(u.shape)\n # print(v.shape)\n\n # define initial latents\n latents_shape = (batch_size, self.unet.config.in_channels, height//8, width//8)\n if latents is None:\n latents = torch.randn(latents_shape, device=DEVICE)\n else:\n assert latents.shape == latents_shape, f\"The shape of input latent tensor {latents.shape} should equal to predefined one.\"\n\n # unconditional embedding for classifier free guidance\n if guidance_scale > 1.:\n max_length = text_input.input_ids.shape[-1]\n if neg_prompt:\n uc_text = neg_prompt\n else:\n uc_text = \"\"\n # uc_text = \"ugly, tiling, poorly drawn hands, poorly drawn feet, body out of frame, cut off, low contrast, underexposed, distorted face\"\n unconditional_input = self.tokenizer(\n [uc_text] * batch_size,\n padding=\"max_length\",\n max_length=77,\n return_tensors=\"pt\"\n )\n # unconditional_input.input_ids = unconditional_input.input_ids[:, 1:]\n unconditional_embeddings = self.text_encoder(unconditional_input.input_ids.to(DEVICE))[0]\n text_embeddings = torch.cat([unconditional_embeddings, text_embeddings], dim=0)\n\n print(\"latents shape: \", latents.shape)\n # iterative sampling\n self.scheduler.set_timesteps(num_inference_steps)\n # print(\"Valid timesteps: \", reversed(self.scheduler.timesteps))\n latents_list = [latents]\n pred_x0_list = [latents]\n if de_bug:\n import pdb;pdb.set_trace()\n for i, t in enumerate(tqdm(self.scheduler.timesteps, desc=\"DDIM Sampler\")):\n if ref_intermediate_latents is not None:\n # note that the batch_size >= 2\n latents_ref = ref_intermediate_latents[-1 - i]\n _, latents_cur = latents.chunk(2)\n latents = torch.cat([latents_ref, latents_cur])\n\n if guidance_scale > 1.:\n model_inputs = torch.cat([latents] * 2)\n else:\n model_inputs = latents\n if unconditioning is not None and isinstance(unconditioning, list):\n _, text_embeddings = text_embeddings.chunk(2)\n text_embeddings = torch.cat([unconditioning[i].expand(*text_embeddings.shape), text_embeddings]) \n # predict tghe noise\n noise_pred = self.unet(model_inputs, t, encoder_hidden_states=text_embeddings).sample\n if guidance_scale > 1.:\n noise_pred_uncon, noise_pred_con = noise_pred.chunk(2, dim=0)\n noise_pred = noise_pred_uncon + guidance_scale * (noise_pred_con - noise_pred_uncon)\n # compute the previous noise sample x_t -> x_t-1\n if lcm_lora:\n latents, pred_x0 = self.scheduler.step(noise_pred, t, latents, return_dict=False)\n else:\n latents, pred_x0 = self.step(noise_pred, t, latents)\n latents_list.append(latents)\n pred_x0_list.append(pred_x0)\n\n image = self.latent2image(latents, return_type=\"pt\")\n if return_intermediates:\n pred_x0_list = [self.latent2image(img, return_type=\"pt\") for img in pred_x0_list]\n latents_list = [self.latent2image(img, return_type=\"pt\") for img in latents_list]\n return image, pred_x0_list, latents_list\n return image\n\n @torch.no_grad()\n def invert(\n self,\n image: torch.Tensor,\n prompt,\n num_inference_steps=50,\n guidance_scale=7.5,\n eta=0.0,\n return_intermediates=False,\n **kwds):\n \"\"\"\n invert a real image into noise map with determinisc DDIM inversion\n \"\"\"\n DEVICE = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n batch_size = image.shape[0]\n if isinstance(prompt, list):\n if batch_size == 1:\n image = image.expand(len(prompt), -1, -1, -1)\n elif isinstance(prompt, str):\n if batch_size > 1:\n prompt = [prompt] * batch_size\n\n # text embeddings\n text_input = self.tokenizer(\n prompt,\n padding=\"max_length\",\n max_length=77,\n return_tensors=\"pt\"\n )\n text_embeddings = self.text_encoder(text_input.input_ids.to(DEVICE))[0]\n print(\"input text embeddings :\", text_embeddings.shape)\n # define initial latents\n latents = self.image2latent(image)\n start_latents = latents\n # print(latents)\n # exit()\n # unconditional embedding for classifier free guidance\n if guidance_scale > 1.:\n max_length = text_input.input_ids.shape[-1]\n unconditional_input = self.tokenizer(\n [\"\"] * batch_size,\n padding=\"max_length\",\n max_length=77,\n return_tensors=\"pt\"\n )\n unconditional_embeddings = self.text_encoder(unconditional_input.input_ids.to(DEVICE))[0]\n text_embeddings = torch.cat([unconditional_embeddings, text_embeddings], dim=0)\n\n print(\"latents shape: \", latents.shape)\n # interative sampling\n self.scheduler.set_timesteps(num_inference_steps)\n print(\"Valid timesteps: \", reversed(self.scheduler.timesteps))\n # print(\"attributes: \", self.scheduler.__dict__)\n latents_list = [latents]\n pred_x0_list = [latents]\n for i, t in enumerate(tqdm(reversed(self.scheduler.timesteps), desc=\"DDIM Inversion\")):\n if guidance_scale > 1.:\n model_inputs = torch.cat([latents] * 2)\n else:\n model_inputs = latents\n\n # predict the noise\n noise_pred = self.unet(model_inputs, t, encoder_hidden_states=text_embeddings).sample\n if guidance_scale > 1.:\n noise_pred_uncon, noise_pred_con = noise_pred.chunk(2, dim=0)\n noise_pred = noise_pred_uncon + guidance_scale * (noise_pred_con - noise_pred_uncon)\n # compute the previous noise sample x_t-1 -> x_t\n latents, pred_x0 = self.next_step(noise_pred, t, latents)\n latents_list.append(latents)\n pred_x0_list.append(pred_x0)\n\n if return_intermediates:\n # return the intermediate laters during inversion\n # pred_x0_list = [self.latent2image(img, return_type=\"pt\") for img in pred_x0_list]\n return latents, latents_list\n return latents, start_latents" }, { "identifier": "AttentionBase", "path": "utils/masactrl_utils.py", "snippet": "class AttentionBase:\n def __init__(self):\n self.cur_step = 0\n self.num_att_layers = -1\n self.cur_att_layer = 0\n\n def after_step(self):\n pass\n\n def __call__(self, q, k, v, sim, attn, is_cross, place_in_unet, num_heads, **kwargs):\n out = self.forward(q, k, v, sim, attn, is_cross, place_in_unet, num_heads, **kwargs)\n self.cur_att_layer += 1\n if self.cur_att_layer == self.num_att_layers:\n self.cur_att_layer = 0\n self.cur_step += 1\n # after step\n self.after_step()\n return out\n\n def forward(self, q, k, v, sim, attn, is_cross, place_in_unet, num_heads, **kwargs):\n out = torch.einsum('b i j, b j d -> b i d', attn, v)\n out = rearrange(out, '(b h) n d -> b n (h d)', h=num_heads)\n return out\n\n def reset(self):\n self.cur_step = 0\n self.cur_att_layer = 0" }, { "identifier": "regiter_attention_editor_diffusers", "path": "utils/masactrl_utils.py", "snippet": "def regiter_attention_editor_diffusers(model, editor: AttentionBase):\n \"\"\"\n Register a attention editor to Diffuser Pipeline, refer from [Prompt-to-Prompt]\n \"\"\"\n def ca_forward(self, place_in_unet):\n def forward(x, encoder_hidden_states=None, attention_mask=None, context=None, mask=None):\n \"\"\"\n The attention is similar to the original implementation of LDM CrossAttention class\n except adding some modifications on the attention\n \"\"\"\n if encoder_hidden_states is not None:\n context = encoder_hidden_states\n if attention_mask is not None:\n mask = attention_mask\n\n to_out = self.to_out\n if isinstance(to_out, nn.modules.container.ModuleList):\n to_out = self.to_out[0]\n else:\n to_out = self.to_out\n\n h = self.heads\n q = self.to_q(x)\n is_cross = context is not None\n context = context if is_cross else x\n k = self.to_k(context)\n v = self.to_v(context)\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))\n\n sim = torch.einsum('b i d, b j d -> b i j', q, k) * self.scale\n\n if mask is not None:\n mask = rearrange(mask, 'b ... -> b (...)')\n max_neg_value = -torch.finfo(sim.dtype).max\n mask = repeat(mask, 'b j -> (b h) () j', h=h)\n mask = mask[:, None, :].repeat(h, 1, 1)\n sim.masked_fill_(~mask, max_neg_value)\n\n attn = sim.softmax(dim=-1)\n # the only difference\n out = editor(\n q, k, v, sim, attn, is_cross, place_in_unet,\n self.heads, scale=self.scale)\n\n return to_out(out)\n\n return forward\n\n def register_editor(net, count, place_in_unet):\n for name, subnet in net.named_children():\n if net.__class__.__name__ == 'Attention': # spatial Transformer layer\n net.forward = ca_forward(net, place_in_unet)\n return count + 1\n elif hasattr(net, 'children'):\n count = register_editor(subnet, count, place_in_unet)\n return count\n\n cross_att_count = 0\n for net_name, net in model.unet.named_children():\n if \"down\" in net_name:\n cross_att_count += register_editor(net, 0, \"down\")\n elif \"mid\" in net_name:\n cross_att_count += register_editor(net, 0, \"mid\")\n elif \"up\" in net_name:\n cross_att_count += register_editor(net, 0, \"up\")\n editor.num_att_layers = cross_att_count" }, { "identifier": "MaskPromptedStyleAttentionControl", "path": "utils/style_attn_control.py", "snippet": "class MaskPromptedStyleAttentionControl(AttentionBase):\n def __init__(self, start_step=4, start_layer=10, style_attn_step=35, layer_idx=None, step_idx=None, total_steps=50, style_guidance=0.1, \n only_masked_region=False, guidance=0.0, \n style_mask=None, source_mask=None, de_bug=False):\n \"\"\"\n MaskPromptedSAC\n Args:\n start_step: the step to start mutual self-attention control\n start_layer: the layer to start mutual self-attention control\n layer_idx: list of the layers to apply mutual self-attention control\n step_idx: list the steps to apply mutual self-attention control\n total_steps: the total number of steps\n thres: the thereshold for mask thresholding\n ref_token_idx: the token index list for cross-attention map aggregation\n cur_token_idx: the token index list for cross-attention map aggregation\n mask_save_dir: the path to save the mask image\n \"\"\"\n\n super().__init__()\n self.total_steps = total_steps\n self.total_layers = 16\n self.start_step = start_step\n self.start_layer = start_layer\n self.layer_idx = layer_idx if layer_idx is not None else list(range(start_layer, self.total_layers))\n self.step_idx = step_idx if step_idx is not None else list(range(start_step, total_steps))\n print(\"using MaskPromptStyleAttentionControl\")\n print(\"MaskedSAC at denoising steps: \", self.step_idx)\n print(\"MaskedSAC at U-Net layers: \", self.layer_idx)\n \n self.de_bug = de_bug\n self.style_guidance = style_guidance\n self.only_masked_region = only_masked_region\n self.style_attn_step = style_attn_step\n self.self_attns = []\n self.cross_attns = []\n self.guidance = guidance\n self.style_mask = style_mask\n self.source_mask = source_mask\n\n\n def after_step(self):\n self.self_attns = []\n self.cross_attns = []\n\n def attn_batch(self, q, k, v, sim, attn, is_cross, place_in_unet, num_heads, q_mask,k_mask, **kwargs):\n B = q.shape[0] // num_heads\n H = W = int(np.sqrt(q.shape[1]))\n q = rearrange(q, \"(b h) n d -> h (b n) d\", h=num_heads)\n k = rearrange(k, \"(b h) n d -> h (b n) d\", h=num_heads)\n v = rearrange(v, \"(b h) n d -> h (b n) d\", h=num_heads)\n\n sim = torch.einsum(\"h i d, h j d -> h i j\", q, k) * kwargs.get(\"scale\")\n \n if q_mask is not None:\n sim = sim.masked_fill(q_mask.unsqueeze(0)==0, -torch.finfo(sim.dtype).max)\n \n if k_mask is not None:\n sim = sim.masked_fill(k_mask.permute(1,0).unsqueeze(0)==0, -torch.finfo(sim.dtype).max)\n \n attn = sim.softmax(-1) if attn is None else attn\n\n if len(attn) == 2 * len(v):\n v = torch.cat([v] * 2)\n out = torch.einsum(\"h i j, h j d -> h i d\", attn, v)\n out = rearrange(out, \"(h1 h) (b n) d -> (h1 b) n (h d)\", b=B, h=num_heads)\n return out\n \n def attn_batch_fg_bg(self, q, k, v, sim, attn, is_cross, place_in_unet, num_heads, q_mask,k_mask, **kwargs):\n B = q.shape[0] // num_heads\n H = W = int(np.sqrt(q.shape[1]))\n q = rearrange(q, \"(b h) n d -> h (b n) d\", h=num_heads)\n k = rearrange(k, \"(b h) n d -> h (b n) d\", h=num_heads)\n v = rearrange(v, \"(b h) n d -> h (b n) d\", h=num_heads)\n sim = torch.einsum(\"h i d, h j d -> h i j\", q, k) * kwargs.get(\"scale\")\n if q_mask is not None:\n sim_fg = sim.masked_fill(q_mask.unsqueeze(0)==0, -torch.finfo(sim.dtype).max)\n sim_bg = sim.masked_fill(q_mask.unsqueeze(0)==1, -torch.finfo(sim.dtype).max)\n if k_mask is not None:\n sim_fg = sim.masked_fill(k_mask.permute(1,0).unsqueeze(0)==0, -torch.finfo(sim.dtype).max)\n sim_bg = sim.masked_fill(k_mask.permute(1,0).unsqueeze(0)==1, -torch.finfo(sim.dtype).max)\n sim = torch.cat([sim_fg, sim_bg])\n attn = sim.softmax(-1)\n\n if len(attn) == 2 * len(v):\n v = torch.cat([v] * 2)\n out = torch.einsum(\"h i j, h j d -> h i d\", attn, v)\n out = rearrange(out, \"(h1 h) (b n) d -> (h1 b) n (h d)\", b=B, h=num_heads)\n return out\n \n def forward(self, q, k, v, sim, attn, is_cross, place_in_unet, num_heads, **kwargs):\n\n \"\"\"\n Attention forward function\n \"\"\"\n \n if is_cross or self.cur_step not in self.step_idx or self.cur_att_layer // 2 not in self.layer_idx:\n return super().forward(q, k, v, sim, attn, is_cross, place_in_unet, num_heads, **kwargs)\n\n B = q.shape[0] // num_heads // 2\n H = W = int(np.sqrt(q.shape[1]))\n \n if self.style_mask is not None and self.source_mask is not None:\n #mask = self.aggregate_cross_attn_map(idx=self.cur_token_idx) # (4, H, W)\n heigh, width = self.style_mask.shape[-2:]\n mask_style = self.style_mask# (H, W)\n mask_source = self.source_mask# (H, W)\n scale = int(np.sqrt(heigh * width / q.shape[1]))\n # res = int(np.sqrt(q.shape[1]))\n spatial_mask_source = F.interpolate(mask_source, (heigh//scale, width//scale)).reshape(-1, 1)\n spatial_mask_style = F.interpolate(mask_style, (heigh//scale, width//scale)).reshape(-1, 1)\n \n else:\n spatial_mask_source=None\n spatial_mask_style=None\n\n if spatial_mask_style is None or spatial_mask_source is None:\n \n out_s,out_c,out_t = self.style_attn_ctrl(q, k, v, sim, attn, is_cross, place_in_unet, num_heads, spatial_mask_source,spatial_mask_style,**kwargs)\n \n else:\n if self.only_masked_region:\n out_s,out_c,out_t = self.mask_prompted_style_attn_ctrl(q, k, v, sim, attn, is_cross, place_in_unet, num_heads, spatial_mask_source,spatial_mask_style,**kwargs)\n else:\n out_s,out_c,out_t = self.separate_mask_prompted_style_attn_ctrl(q, k, v, sim, attn, is_cross, place_in_unet, num_heads, spatial_mask_source,spatial_mask_style,**kwargs)\n\n out = torch.cat([out_s,out_c,out_t],dim=0) \n return out\n \n\n def style_attn_ctrl(self,q,k,v,sim,attn,is_cross,place_in_unet,num_heads,spatial_mask_source,spatial_mask_style,**kwargs):\n if self.de_bug:\n import pdb; pdb.set_trace()\n \n qs, qc, qt = q.chunk(3)\n\n out_s = self.attn_batch(qs, k[:num_heads], v[:num_heads], sim[:num_heads], attn[:num_heads], is_cross, place_in_unet, num_heads, q_mask=None,k_mask=None,**kwargs)\n out_c = self.attn_batch(qc, k[:num_heads], v[:num_heads], sim[:num_heads], None, is_cross, place_in_unet, num_heads, q_mask=None,k_mask=None,**kwargs)\n\n if self.cur_step < self.style_attn_step:\n out_t = self.attn_batch(qc, k[:num_heads], v[:num_heads], sim[:num_heads], None, is_cross, place_in_unet, num_heads, q_mask=None,k_mask=None,**kwargs)\n else:\n out_t = self.attn_batch(qt, k[:num_heads], v[:num_heads], sim[:num_heads], None, is_cross, place_in_unet, num_heads, q_mask=None,k_mask=None,**kwargs)\n if self.style_guidance>=0:\n out_t = out_c + (out_t - out_c) * self.style_guidance\n return out_s,out_c,out_t\n\n def mask_prompted_style_attn_ctrl(self,q,k,v,sim,attn,is_cross,place_in_unet,num_heads,spatial_mask_source,spatial_mask_style,**kwargs):\n qs, qc, qt = q.chunk(3)\n \n out_s = self.attn_batch(qs, k[:num_heads], v[:num_heads], sim[:num_heads], attn[:num_heads], is_cross, place_in_unet, num_heads, q_mask=None,k_mask=None,**kwargs)\n out_c = self.attn_batch(qc, k[num_heads: 2*num_heads], v[num_heads:2*num_heads], sim[num_heads: 2*num_heads], attn[num_heads: 2*num_heads], is_cross, place_in_unet, num_heads, q_mask=None,k_mask=None, **kwargs)\n out_c_new = self.attn_batch(qc, k[num_heads: 2*num_heads], v[num_heads:2*num_heads], sim[num_heads: 2*num_heads], None, is_cross, place_in_unet, num_heads, q_mask=None,k_mask=None, **kwargs)\n \n if self.de_bug:\n import pdb; pdb.set_trace()\n\n if self.cur_step < self.style_attn_step:\n out_t = out_c #self.attn_batch(qc, k[:num_heads], v[:num_heads], sim[:num_heads], attn, is_cross, place_in_unet, num_heads, q_mask=spatial_mask_source,k_mask=spatial_mask_style,**kwargs)\n else:\n out_t_fg = self.attn_batch(qt, k[:num_heads], v[:num_heads], sim[:num_heads], None, is_cross, place_in_unet, num_heads, q_mask=spatial_mask_source,k_mask=spatial_mask_style,**kwargs)\n out_c_fg = self.attn_batch(qc, k[:num_heads], v[:num_heads], sim[:num_heads], None, is_cross, place_in_unet, num_heads, q_mask=spatial_mask_source,k_mask=spatial_mask_style,**kwargs)\n if self.style_guidance>=0:\n out_t = out_c_fg + (out_t_fg - out_c_fg) * self.style_guidance \n \n out_t = out_t * spatial_mask_source + out_c * (1 - spatial_mask_source)\n\n if self.de_bug:\n import pdb; pdb.set_trace()\n \n # print(torch.sum(out_t* (1 - spatial_mask_source) - out_c * (1 - spatial_mask_source)))\n return out_s,out_c,out_t\n\n def separate_mask_prompted_style_attn_ctrl(self,q,k,v,sim,attn,is_cross,place_in_unet,num_heads,spatial_mask_source,spatial_mask_style,**kwargs):\n \n if self.de_bug:\n import pdb; pdb.set_trace()\n # To prevent query confusion, render fg and bg according to mask.\n qs, qc, qt = q.chunk(3)\n out_s = self.attn_batch(qs, k[:num_heads], v[:num_heads], sim[:num_heads], attn[:num_heads], is_cross, place_in_unet, num_heads, q_mask=None,k_mask=None,**kwargs)\n if self.cur_step < self.style_attn_step: \n \n out_c = self.attn_batch_fg_bg(qc, k[:num_heads], v[:num_heads], sim[:num_heads], attn, is_cross, place_in_unet, num_heads, q_mask=spatial_mask_source,k_mask=spatial_mask_style,**kwargs)\n out_c_fg,out_c_bg = out_c.chunk(2)\n out_t = out_c_fg * spatial_mask_source + out_c_bg * (1 - spatial_mask_source)\n\n else:\n out_t = self.attn_batch_fg_bg(qt, k[:num_heads], v[:num_heads], sim[:num_heads], attn, is_cross, place_in_unet, num_heads, q_mask=spatial_mask_source,k_mask=spatial_mask_style,**kwargs)\n out_c = self.attn_batch_fg_bg(qc, k[:num_heads], v[:num_heads], sim[:num_heads], attn, is_cross, place_in_unet, num_heads, q_mask=spatial_mask_source,k_mask=spatial_mask_style,**kwargs)\n out_t_fg,out_t_bg = out_t.chunk(2)\n out_c_fg,out_c_bg = out_c.chunk(2)\n if self.style_guidance>=0:\n out_t_fg = out_c_fg + (out_t_fg - out_c_fg) * self.style_guidance \n out_t_bg = out_c_bg + (out_t_bg - out_c_bg) * self.style_guidance \n out_t = out_t_fg * spatial_mask_source + out_t_bg * (1 - spatial_mask_source)\n \n return out_s,out_t,out_t" } ]
import os import sys import pandas as pd import torch import torch.nn as nn import torch.nn.functional as F import argparse import numpy as np from tqdm import tqdm from diffusers import DDIMScheduler,LCMScheduler from torchvision.utils import save_image from torchvision.io import read_image from PIL import Image from utils.pipeline import MasaCtrlPipeline from utils.masactrl_utils import AttentionBase, regiter_attention_editor_diffusers from utils.style_attn_control import MaskPromptedStyleAttentionControl
8,005
def load_image(image_path, res, device, gray=False): image = Image.open(image_path).convert('RGB') if not gray else Image.open(image_path).convert('L') image = torch.tensor(np.array(image)).float() if gray: image = image.unsqueeze(-1).repeat(1,1,3) image = image.permute(2, 0, 1) image = image[:3].unsqueeze_(0).float() / 127.5 - 1. # [-1, 1] image = F.interpolate(image, (res, res)) image = image.to(device) return image def load_mask(image_path, res, device): if image_path != '': image = Image.open(image_path).convert('RGB') image = torch.tensor(np.array(image)).float() image = image.permute(2, 0, 1) image = image[:3].unsqueeze_(0).float() / 127.5 - 1. # [-1, 1] image = F.interpolate(image, (res, res)) image = image.to(device) image = image[:, :1, :, :] else: return None return image def main(): args = argparse.ArgumentParser() args.add_argument("--step", type=int, default=0) args.add_argument("--layer", type=int, default=10) args.add_argument("--res", type=int, default=512) args.add_argument("--style_guidance", type=float, default=1.5) args.add_argument("--content", type=str, default=None) args.add_argument("--style", type=str, default=None) args.add_argument("--content_mask", type=str, default='') args.add_argument("--style_mask", type=str, default='') args.add_argument("--output", type=str, default='./results/') args.add_argument("--only_mask_region", action="store_true") args.add_argument("--model_path", type=str, default='runwayml/stable-diffusion-v1-5') args.add_argument("--SAC_step", type=int, default=35) args.add_argument("--num_inference_steps", type=int, default=50) args.add_argument("--LCM_lora", action="store_true") args = args.parse_args() STEP = args.step LAYPER = args.layer only_mask_region = args.only_mask_region out_dir = args.output style_guidance = args.style_guidance num_inference_steps = args.num_inference_steps SAC_step = args.SAC_step device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") Guidance_scale = 0.0 model_path = args.model_path model = MasaCtrlPipeline.from_pretrained(model_path).to(device) if args.LCM_lora: model.scheduler = LCMScheduler.from_config(model.scheduler.config) # load LCM-LoRA model.load_lora_weights("latent-consistency/lcm-lora-sdv1-5") else: model.scheduler = DDIMScheduler.from_config(model.scheduler.config) source_image = load_image(args.content, args.res, device) style_image = load_image(args.style, args.res, device) style_mask = load_mask(args.style_mask, res=64, device=device) source_mask = load_mask(args.content_mask, res=args.res, device=device) with torch.no_grad(): style_content = torch.cat([style_image, source_image], dim=0) source_prompt = ['head', 'head'] prompts = source_prompt + ['head']
def load_image(image_path, res, device, gray=False): image = Image.open(image_path).convert('RGB') if not gray else Image.open(image_path).convert('L') image = torch.tensor(np.array(image)).float() if gray: image = image.unsqueeze(-1).repeat(1,1,3) image = image.permute(2, 0, 1) image = image[:3].unsqueeze_(0).float() / 127.5 - 1. # [-1, 1] image = F.interpolate(image, (res, res)) image = image.to(device) return image def load_mask(image_path, res, device): if image_path != '': image = Image.open(image_path).convert('RGB') image = torch.tensor(np.array(image)).float() image = image.permute(2, 0, 1) image = image[:3].unsqueeze_(0).float() / 127.5 - 1. # [-1, 1] image = F.interpolate(image, (res, res)) image = image.to(device) image = image[:, :1, :, :] else: return None return image def main(): args = argparse.ArgumentParser() args.add_argument("--step", type=int, default=0) args.add_argument("--layer", type=int, default=10) args.add_argument("--res", type=int, default=512) args.add_argument("--style_guidance", type=float, default=1.5) args.add_argument("--content", type=str, default=None) args.add_argument("--style", type=str, default=None) args.add_argument("--content_mask", type=str, default='') args.add_argument("--style_mask", type=str, default='') args.add_argument("--output", type=str, default='./results/') args.add_argument("--only_mask_region", action="store_true") args.add_argument("--model_path", type=str, default='runwayml/stable-diffusion-v1-5') args.add_argument("--SAC_step", type=int, default=35) args.add_argument("--num_inference_steps", type=int, default=50) args.add_argument("--LCM_lora", action="store_true") args = args.parse_args() STEP = args.step LAYPER = args.layer only_mask_region = args.only_mask_region out_dir = args.output style_guidance = args.style_guidance num_inference_steps = args.num_inference_steps SAC_step = args.SAC_step device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") Guidance_scale = 0.0 model_path = args.model_path model = MasaCtrlPipeline.from_pretrained(model_path).to(device) if args.LCM_lora: model.scheduler = LCMScheduler.from_config(model.scheduler.config) # load LCM-LoRA model.load_lora_weights("latent-consistency/lcm-lora-sdv1-5") else: model.scheduler = DDIMScheduler.from_config(model.scheduler.config) source_image = load_image(args.content, args.res, device) style_image = load_image(args.style, args.res, device) style_mask = load_mask(args.style_mask, res=64, device=device) source_mask = load_mask(args.content_mask, res=args.res, device=device) with torch.no_grad(): style_content = torch.cat([style_image, source_image], dim=0) source_prompt = ['head', 'head'] prompts = source_prompt + ['head']
editor = AttentionBase()
1
2023-12-06 01:18:39+00:00
12k
MarilynKeller/aitviewer-skel
aitviewer/streamables/webcam.py
[ { "identifier": "Node", "path": "aitviewer/scene/node.py", "snippet": "class Node(object):\n \"\"\"Interface for nodes.\"\"\"\n\n def __init__(\n self,\n name=None,\n icon=None,\n position=None,\n rotation=None,\n scale=1.0,\n color=(0.5, 0.5, 0.5, 1.0),\n material=None,\n is_selectable=True,\n gui_affine=True,\n gui_material=True,\n enabled_frames=None,\n n_frames=1,\n ):\n \"\"\"\n :param name: Name of the node\n :param icon: Custom Node Icon using custom Icon font\n :param position: Starting position in the format (X,Y,Z) or np array of positions with shape (F, 3)\n :param rotation: Starting rotation in rotation matrix representation (3,3) or np array of rotations with shape (F, 3, 3)\n :param scale: Starting scale (scalar) or np array of scale values with shape (F)\n :param color: (R,G,B,A) 0-1 formatted color value.\n :param material: Object material properties. The color specified in the material will override node color\n :param is_selectable: If True the node is selectable when clicked on, otherwise the parent node will be selected.\n :param gui_affine: If True the node will have transform controls (position, rotation, scale) in the GUI.\n :param gui_material: If True the node will have material controls in the GUI.\n :param enabled_frames: Numpy array of boolean values, the object will be enabled only in frames where the value is True,\n the number of ones in the mask must match the number of frames of the object.\n :param n_frames: How many frames this renderable has.\n \"\"\"\n # Transform & Animation\n position = np.zeros(3, dtype=np.float32) if position is None else np.array(position, dtype=np.float32)\n rotation = np.eye(3, dtype=np.float32) if rotation is None else np.array(rotation, dtype=np.float32)\n\n self._positions = position if len(position.shape) != 1 else position[np.newaxis]\n self._rotations = rotation if len(rotation.shape) != 2 else rotation[np.newaxis]\n self._scales = (scale if isinstance(scale, np.ndarray) else np.array([scale])).astype(np.float32)\n\n n_positions = self._positions.shape[0]\n n_rotations = self._rotations.shape[0]\n n_scales = self._scales.shape[0]\n\n if n_frames > 1:\n assert n_positions == 1 or n_frames == n_positions, (\n f\"Number of position frames\" f\" ({n_positions}) must be 1 or match number of Node frames {n_frames}\"\n )\n assert n_rotations == 1 or n_frames == n_rotations, (\n f\"Number of rotations frames\" f\" ({n_rotations}) must be 1 or match number of Node frames {n_frames}\"\n )\n assert n_scales == 1 or n_frames == n_scales, (\n f\"Number of scales frames\" f\" ({n_scales}) must be 1 or match number of Node frames {n_frames}\"\n )\n else:\n n_frames = max(n_positions, n_rotations, n_scales)\n assert (\n (n_positions == 1 or n_positions == n_frames)\n and (n_rotations == 1 or n_rotations == n_frames)\n and (n_scales == 1 or n_scales == n_frames)\n ), (\n f\"Number of position\"\n f\"({n_positions}), rotation ({n_rotations}) and scale ({n_scales})\"\n \"frames must be 1 or match.\"\n )\n\n # Frames\n self._n_frames = n_frames\n self._current_frame_id = 0\n self.model_matrix = self.get_local_transform()\n self._enabled_frames = enabled_frames\n if self._enabled_frames is not None:\n assert np.count_nonzero(self._enabled_frames) == n_frames, (\n f\"Number of non-zero elements in enabled_frames\"\n f\" ({np.count_nonzero(self._enabled_frames)}) must match number of frames in sequence ({n_frames})\"\n )\n # Create an array that maps from the true frame id (counting also disabled frames) to the index of the\n # first existing frame in the sequence.\n self._enabled_frame_id = np.cumsum(self._enabled_frames) - 1\n\n # Stores the true frame id (counting also disabled frames) we use this to allow going\n # through both enabled and disabled frames from the GUI.\n self._internal_frame_id = 0\n\n # Material\n self.material = Material(color=color) if material is None else material\n\n # Renderable Attributes\n self.is_renderable = False\n self.backface_culling = True\n self.backface_fragmap = False\n self.draw_outline = False\n\n # Flags to enable rendering passes\n self.cast_shadow = False\n self.depth_prepass = False\n self.fragmap = False\n self.outline = False\n\n # Programs for render passes. Subclasses are responsible for setting these.\n self.depth_only_program = None # Required for depth_prepass and cast_shadow passes\n self.fragmap_program = None # Required for fragmap pass\n self.outline_program = None # Required for outline pass\n\n # GUI\n self.name = name if name is not None else type(self).__name__\n self.uid = C.next_gui_id()\n self.unique_name = self.name + \"{}\".format(self.uid)\n self.icon = icon if icon is not None else \"\\u0082\"\n self._enabled = True\n self._expanded = False\n self.gui_controls = {\n \"affine\": {\n \"fn\": self.gui_affine,\n \"icon\": \"\\u009b\",\n \"is_visible\": gui_affine,\n },\n \"material\": {\n \"fn\": self.gui_material,\n \"icon\": \"\\u0088\",\n \"is_visible\": gui_material,\n },\n \"animation\": {\n \"fn\": self.gui_animation,\n \"icon\": \"\\u0098\",\n \"is_visible\": (lambda: self._n_frames > 1)(),\n },\n \"io\": {\n \"fn\": self.gui_io,\n \"icon\": \"\\u009a\",\n \"is_visible\": (lambda: self.gui_io.__func__ is not Node.gui_io)(),\n },\n }\n self.gui_modes = {\"view\": {\"title\": \" View\", \"fn\": self.gui_mode_view, \"icon\": \"\\u0099\"}}\n self._selected_mode = \"view\"\n self._show_in_hierarchy = True\n self.is_selectable = is_selectable\n self.export_usd_enabled = True\n self.export_usd_expanded = True\n\n self.nodes: List[Node] = []\n self.parent: Node = None\n\n # Selected Mode\n @property\n def selected_mode(self):\n return self._selected_mode\n\n @selected_mode.setter\n def selected_mode(self, selected_mode):\n self._selected_mode = selected_mode\n\n # Transform\n @property\n def position(self):\n idx = self.current_frame_id if self._positions.shape[0] > 1 else 0\n return self._positions[idx]\n\n @position.setter\n def position(self, position):\n idx = self.current_frame_id if self._positions.shape[0] > 1 else 0\n self._positions[idx] = np.array(position, dtype=np.float32).copy()\n self.update_transform(None if self.parent is None else self.parent.model_matrix)\n\n @property\n def positions(self):\n return self._positions\n\n @positions.setter\n def positions(self, positions):\n self._positions = positions\n self.update_transform(None if self.parent is None else self.parent.model_matrix)\n\n @property\n def rotation(self):\n idx = self.current_frame_id if self._rotations.shape[0] > 1 else 0\n return self._rotations[idx]\n\n @rotation.setter\n def rotation(self, rotation):\n idx = self.current_frame_id if self._rotations.shape[0] > 1 else 0\n self._rotations[idx] = rotation\n self.update_transform(None if self.parent is None else self.parent.model_matrix)\n\n @property\n def rotations(self):\n return self._rotations\n\n @rotations.setter\n def rotations(self, rotations):\n self._rotations = rotations\n self.update_transform(None if self.parent is None else self.parent.model_matrix)\n\n @property\n def scale(self):\n idx = self.current_frame_id if self._scales.shape[0] > 1 else 0\n return self._scales[idx]\n\n @scale.setter\n def scale(self, scale):\n idx = self.current_frame_id if self._scales.shape[0] > 1 else 0\n self._scales[idx] = scale\n self.update_transform(None if self.parent is None else self.parent.model_matrix)\n\n @property\n def scales(self):\n return self._scales\n\n @scales.setter\n def scales(self, scales):\n self._scales = scales\n self.update_transform(None if self.parent is None else self.parent.model_matrix)\n\n @staticmethod\n @lru_cache()\n def _compute_transform(pos, rot, scale):\n rotation = np.eye(4)\n rotation[:3, :3] = np.array(rot)\n\n trans = np.eye(4)\n trans[:3, 3] = np.array(pos)\n\n scale = np.diag([scale, scale, scale, 1])\n\n return (trans @ rotation @ scale).astype(\"f4\")\n\n def get_local_transform(self):\n \"\"\"Construct local transform as a 4x4 matrix from this node's position, orientation and scale.\"\"\"\n return self._compute_transform(tuple(self.position), tuple(map(tuple, self.rotation)), self.scale)\n\n def update_transform(self, parent_transform=None):\n \"\"\"Update the model matrix of this node and all of its descendants.\"\"\"\n if parent_transform is None:\n self.model_matrix = self.get_local_transform()\n else:\n self.model_matrix = parent_transform.astype(\"f4\") @ self.get_local_transform()\n\n for n in self.nodes:\n n.update_transform(self.model_matrix)\n\n @property\n def color(self):\n return self.material.color\n\n @color.setter\n def color(self, color):\n self.material.color = color\n\n @property\n def bounds(self):\n \"\"\"The bounds in the format ((x_min, x_max), (y_min, y_max), (z_min, z_max))\"\"\"\n return np.array([[0, 0], [0, 0], [0, 0]])\n\n @property\n def current_bounds(self):\n return np.array([[0, 0], [0, 0], [0, 0]])\n\n @property\n def current_center(self):\n return self.current_bounds.mean(-1)\n\n @property\n def center(self):\n return self.bounds.mean(-1)\n\n def get_local_bounds(self, points):\n if len(points.shape) == 2 and points.shape[-1] == 3:\n points = points[np.newaxis]\n assert len(points.shape) == 3\n\n # Compute min and max coordinates of the bounding box ignoring NaNs.\n val = np.array(\n [\n [np.nanmin(points[:, :, 0]), np.nanmax(points[:, :, 0])],\n [np.nanmin(points[:, :, 1]), np.nanmax(points[:, :, 1])],\n [np.nanmin(points[:, :, 2]), np.nanmax(points[:, :, 2])],\n ]\n )\n\n # If any of the elements is NaN return an empty bounding box.\n if np.isnan(val).any():\n return np.array([[0, 0], [0, 0], [0, 0]])\n else:\n return val\n\n def get_bounds(self, points):\n val = self.get_local_bounds(points)\n\n # Transform bounding box with the model matrix.\n val = (self.model_matrix @ np.vstack((val, np.array([1.0, 1.0]))))[:3]\n\n # If any of the elements is NaN return an empty bounding box.\n if np.isnan(val).any():\n return np.array([[0, 0], [0, 0], [0, 0]])\n else:\n return val\n\n @property\n def n_frames(self):\n return self._n_frames\n\n @n_frames.setter\n def n_frames(self, n_frames):\n self._n_frames = n_frames\n\n def __len__(self):\n return self.n_frames\n\n @property\n def current_frame_id(self):\n return self._current_frame_id\n\n @current_frame_id.setter\n def current_frame_id(self, frame_id):\n # Check if the frame changed.\n last_frame_id = self._current_frame_id if self._enabled_frames is None else self._internal_frame_id\n if self.n_frames == 1 or frame_id == last_frame_id:\n return\n\n self.on_before_frame_update()\n if self._enabled_frames is None:\n if frame_id < 0:\n self._current_frame_id = 0\n elif frame_id >= len(self):\n self._current_frame_id = len(self) - 1\n else:\n self._current_frame_id = frame_id\n else:\n # If an enabled_frames is present use it to get the current frame.\n if frame_id < 0:\n self._internal_frame_id = 0\n elif frame_id >= self._enabled_frames.shape[0]:\n self._internal_frame_id = self._enabled_frames.shape[0] - 1\n else:\n self._internal_frame_id = frame_id\n self._current_frame_id = self._enabled_frame_id[self._internal_frame_id]\n # Update enabled using the mask.\n self.enabled = self._enabled_frames[self._internal_frame_id]\n\n # Update frame id of all children nodes.\n for n in self.nodes:\n n.current_frame_id = self._current_frame_id\n\n self.on_frame_update()\n if self.parent and (self._positions.shape[0] > 1 or self._rotations.shape[0] > 1 or self._scales.shape[0] > 1):\n self.update_transform(self.parent.model_matrix)\n\n def next_frame(self):\n self.current_frame_id = self.current_frame_id + 1 if self.current_frame_id < len(self) - 1 else 0\n\n def previous_frame(self):\n self.current_frame_id = self.current_frame_id - 1 if self.current_frame_id > 0 else len(self) - 1\n\n def on_before_frame_update(self):\n \"\"\"Called when the current frame is about to change, 'self.current_frame_id' still has the id of the\n previous frame.\"\"\"\n pass\n\n def on_frame_update(self):\n \"\"\"Called when the current frame is changed.\"\"\"\n pass\n\n def add(self, *nodes, **kwargs):\n self._add_nodes(*nodes, **kwargs)\n\n def _add_node(self, n: \"Node\", show_in_hierarchy=True, expanded=False, enabled=True):\n \"\"\"\n Add a single node\n :param show_in_hierarchy: Whether to show the node in the scene hierarchy.\n :param expanded: Whether the node is initially expanded in the GUI.\n \"\"\"\n if n is None:\n return\n n._show_in_hierarchy = show_in_hierarchy\n n._expanded = expanded\n n._enabled = enabled if n._enabled_frames is None else n._enabled_frames[n.current_frame_id]\n self.nodes.append(n)\n n.parent = self\n n.update_transform(self.model_matrix)\n\n def _add_nodes(self, *nodes, **kwargs):\n \"\"\"Add multiple nodes\"\"\"\n for n in nodes:\n self._add_node(n, **kwargs)\n\n def remove(self, *nodes):\n for n in nodes:\n n.release()\n try:\n self.nodes.remove(n)\n except:\n pass\n\n @property\n def show_in_hierarchy(self):\n return self._show_in_hierarchy\n\n @property\n def enabled(self):\n return self._enabled\n\n @enabled.setter\n def enabled(self, enabled):\n self._enabled = enabled\n\n @property\n def expanded(self):\n return self._expanded\n\n @expanded.setter\n def expanded(self, expanded):\n self._expanded = expanded\n\n def is_transparent(self):\n \"\"\"\n Returns true if the object is transparent and should thus be sorted when rendering.\n Subclassess that use a different color should implement this method to be rendered correctly when transparent.\n \"\"\"\n return self.material.color[3] < 1.0\n\n def gui(self, imgui):\n \"\"\"\n Render GUI for custom node properties and controls. Implementation optional.\n Elements rendered here will show up in the scene hierarchy\n :param imgui: imgui context.\n See https://pyimgui.readthedocs.io/en/latest/reference/imgui.core.html for available elements to render\n \"\"\"\n pass\n\n def gui_modes(self, imgui):\n \"\"\"Render GUI with toolbar (tools) for this particular node\"\"\"\n\n def gui_animation(self, imgui):\n \"\"\"Render GUI for animation related settings\"\"\"\n\n if self._enabled_frames is None:\n if self.n_frames > 1:\n u, fid = imgui.slider_int(\n \"Frame##r_{}\".format(self.unique_name),\n self.current_frame_id,\n min_value=0,\n max_value=self.n_frames - 1,\n )\n if u:\n self.current_frame_id = fid\n else:\n u, fid = imgui.slider_int(\n \"Frame##r_{}\".format(self.unique_name),\n self._internal_frame_id,\n min_value=0,\n max_value=self._enabled_frames.shape[0] - 1,\n )\n if u:\n self.current_frame_id = fid\n\n def gui_affine(self, imgui):\n \"\"\"Render GUI for affine transformations\"\"\"\n # Position controls\n up, pos = imgui.drag_float3(\n \"Position##pos{}\".format(self.unique_name),\n *self.position,\n 1e-2,\n format=\"%.2f\",\n )\n if up:\n self.position = pos\n\n # Rotation controls\n euler_angles = rot2euler_numpy(self.rotation[np.newaxis], degrees=True)[0]\n ur, euler_angles = imgui.drag_float3(\n \"Rotation##pos{}\".format(self.unique_name),\n *euler_angles,\n 1e-2,\n format=\"%.2f\",\n )\n if ur:\n self.rotation = euler2rot_numpy(np.array(euler_angles)[np.newaxis], degrees=True)[0]\n\n # Scale controls\n us, scale = imgui.drag_float(\n \"Scale##scale{}\".format(self.unique_name),\n self.scale,\n 1e-2,\n min_value=0.001,\n max_value=100.0,\n format=\"%.3f\",\n )\n if us:\n self.scale = scale\n\n def gui_material(self, imgui):\n \"\"\"Render GUI with material properties\"\"\"\n\n # Color Control\n uc, color = imgui.color_edit4(\"Color##color{}'\".format(self.unique_name), *self.material.color)\n if uc:\n self.color = color\n\n # Diffuse\n ud, diffuse = imgui.slider_float(\n \"Diffuse##diffuse{}\".format(self.unique_name),\n self.material.diffuse,\n 0.0,\n 1.0,\n \"%.2f\",\n )\n if ud:\n self.material.diffuse = diffuse\n\n # Ambient\n ua, ambient = imgui.slider_float(\n \"Ambient##ambient{}\".format(self.unique_name),\n self.material.ambient,\n 0.0,\n 1.0,\n \"%.2f\",\n )\n if ua:\n self.material.ambient = ambient\n\n def gui_io(self, imgui):\n \"\"\"Render GUI for import/export\"\"\"\n pass\n\n def gui_mode_view(self, imgui):\n \"\"\"Render custom GUI for view mode\"\"\"\n pass\n\n def gui_context_menu(self, imgui, x: int, y: int):\n _, self.enabled = imgui.checkbox(\"Enabled\", self.enabled)\n if any([n._show_in_hierarchy for n in self.nodes]):\n imgui.spacing()\n imgui.separator()\n imgui.spacing()\n for n in self.nodes:\n if not n._show_in_hierarchy:\n continue\n if imgui.begin_menu(f\"{n.name}##{n.uid}\"):\n n.gui_context_menu(imgui, x, y)\n imgui.end_menu()\n\n # Renderable\n @staticmethod\n def once(func):\n def _decorator(self, *args, **kwargs):\n if self.is_renderable:\n return\n else:\n func(self, *args, **kwargs)\n self.is_renderable = True\n\n return _decorator\n\n def make_renderable(self, ctx):\n \"\"\"\n Prepares this object for rendering. This function must be called before `render` is used.\n :param ctx: The moderngl context.\n \"\"\"\n pass\n\n def render(self, camera, position=None, rotation=None, **kwargs):\n \"\"\"Render the current frame in this sequence.\"\"\"\n pass\n\n def render_positions(self, prog):\n \"\"\"\n Render with a VAO with only positions bound, used for shadow mapping, fragmap and depth prepass.\n \"\"\"\n pass\n\n def redraw(self, **kwargs):\n \"\"\"Perform update and redraw operations. Push to the GPU when finished. Recursively redraw child nodes\"\"\"\n for n in self.nodes:\n n.redraw(**kwargs)\n\n def set_camera_matrices(self, prog, camera, **kwargs):\n \"\"\"Set the model view projection matrix in the given program.\"\"\"\n # Transpose because np is row-major but OpenGL expects column-major.\n prog[\"model_matrix\"].write(self.model_matrix.T.astype(\"f4\").tobytes())\n prog[\"view_projection_matrix\"].write(camera.get_view_projection_matrix().T.astype(\"f4\").tobytes())\n\n def receive_shadow(self, program, **kwargs):\n \"\"\"\n Call this function if the renderable is to receive shadows.\n :param program: The shader program that can shade with shadows.\n :param kwargs: The render kwargs.\n \"\"\"\n if kwargs.get(\"shadows_enabled\", False):\n lights = kwargs[\"lights\"]\n\n for i, light in enumerate(lights):\n if light.shadow_enabled and light.shadow_map:\n light_matrix = light.mvp() @ self.model_matrix\n program[f\"dirLights[{i}].matrix\"].write(light_matrix.T.tobytes())\n\n # Bind shadowmap to slot i + 1, we reserve slot 0 for the mesh texture\n # and use slots 1 to (#lights + 1) for shadow maps\n light.shadow_map.use(location=i + 1)\n\n # Set sampler uniforms\n uniform = program[f\"shadow_maps\"]\n uniform.value = 1 if uniform.array_length == 1 else [*range(1, len(lights) + 1)]\n\n def render_shadowmap(self, light_matrix):\n if not self.cast_shadow or self.depth_only_program is None or self.color[3] == 0.0:\n return\n\n prog = self.depth_only_program\n prog[\"model_matrix\"].write(self.model_matrix.T.tobytes())\n prog[\"view_projection_matrix\"].write(light_matrix.T.tobytes())\n\n self.render_positions(prog)\n\n def render_fragmap(self, ctx, camera, uid=None):\n if not self.fragmap or self.fragmap_program is None:\n return\n\n # Transpose because np is row-major but OpenGL expects column-major.\n prog = self.fragmap_program\n self.set_camera_matrices(prog, camera)\n\n # Render with the specified object uid, if None use the node uid instead.\n prog[\"obj_id\"] = uid or self.uid\n\n if self.backface_culling or self.backface_fragmap:\n ctx.enable(moderngl.CULL_FACE)\n else:\n ctx.disable(moderngl.CULL_FACE)\n\n # If backface_fragmap is enabled for this node only render backfaces\n if self.backface_fragmap:\n ctx.cull_face = \"front\"\n\n self.render_positions(prog)\n\n # Restore cull face to back\n if self.backface_fragmap:\n ctx.cull_face = \"back\"\n\n def render_depth_prepass(self, camera, **kwargs):\n if not self.depth_prepass or self.depth_only_program is None:\n return\n\n prog = self.depth_only_program\n self.set_camera_matrices(prog, camera)\n self.render_positions(prog)\n\n def render_outline(self, ctx, camera):\n if self.outline and self.outline_program is not None:\n prog = self.outline_program\n self.set_camera_matrices(prog, camera)\n\n if self.backface_culling:\n ctx.enable(moderngl.CULL_FACE)\n else:\n ctx.disable(moderngl.CULL_FACE)\n self.render_positions(prog)\n\n # Render children node recursively.\n for n in self.nodes:\n n.render_outline(ctx, camera)\n\n def release(self):\n \"\"\"\n Release all OpenGL resources used by this node and any of its children. Subclasses that instantiate OpenGL\n objects should implement this method with '@hooked' to avoid leaking resources.\n \"\"\"\n for n in self.nodes:\n n.release()\n\n def on_selection(self, node, instance_id, tri_id):\n \"\"\"\n Called when the node is selected\n\n :param node: the node which was clicked (can be None if the selection wasn't a mouse event)\n :param instance_id: the id of the instance that was clicked, 0 if the object is not instanced\n (can be None if the selection wasn't a mouse event)\n :param tri_id: the id of the triangle that was clicked from the 'node' mesh\n (can be None if the selection wasn't a mouse event)\n \"\"\"\n pass\n\n def key_event(self, key, wnd_keys):\n \"\"\"\n Handle shortcut key presses (if you are the selected object)\n \"\"\"\n pass\n\n def update_frames(self, *args, **kwargs):\n pass\n\n def add_frames(self, *args, **kwargs):\n pass\n\n def remove_frames(self, *args, **kwargs):\n pass\n\n def _export_usd_recursively(self, stage, usd_path, directory, verbose):\n if verbose:\n print(usd_path)\n for n in self.nodes:\n if n.export_usd_enabled:\n n.export_usd(stage, usd_path, directory, verbose)\n\n def export_usd(self, stage, usd_path: str, directory: str = None, verbose=False):\n \"\"\"\n Export the node into an USD file. Nodes that implement this method should use\n recursively call this for every children that should also be exported.\n\n :param stage: an object of type Usd.Stage into which to export the node\n :param usd_path: the path of the parent object in the USD file scene hierarchy.\n \"\"\"\n from pxr import Gf, UsdGeom\n\n usd_path = f\"{usd_path}/{self.name.replace(' ', '_')}_{self.uid:03}\"\n\n # Transform.\n xform = UsdGeom.Xform.Define(stage, usd_path)\n a_xform = xform.AddTransformOp()\n a_xform.Set(Gf.Matrix4d(self.get_local_transform().astype(np.float64).T))\n\n self._export_usd_recursively(stage, usd_path, directory, verbose)" }, { "identifier": "get_screen_texture_program", "path": "aitviewer/shaders.py", "snippet": "@functools.lru_cache()\ndef get_screen_texture_program():\n return load_program(\"screen_texture.glsl\")" }, { "identifier": "Streamable", "path": "aitviewer/streamables/streamable.py", "snippet": "class Streamable(Node):\n \"\"\"Interface for renderables.\"\"\"\n\n def __init__(self, **kwargs):\n super(Streamable, self).__init__(**kwargs)\n\n self.is_recording = False\n\n def start(self):\n pass\n\n def stop(self):\n pass\n\n def capture(self):\n \"\"\"Capture from the sensor\"\"\"\n raise NotImplementedError(\"Must be implemented by the subclass.\")\n\n def record_start(self):\n self.is_recording = True\n\n def record_capture(self):\n pass\n\n def record_finish(self):\n self.is_recording = False\n return []" } ]
import cv2 import numpy as np from moderngl_window import geometry from aitviewer.scene.node import Node from aitviewer.shaders import get_screen_texture_program from aitviewer.streamables.streamable import Streamable
7,350
# Copyright (C) 2023 ETH Zurich, Manuel Kaufmann, Velko Vechev, Dario Mylonopoulos class Webcam(Streamable): """ Renders webcam stream to a quad. Quad is positioned in screen coordinates. """ def __init__( self, src=0, size=(2.0, 2.0), pos=(0.0, 0.0), transparency=1.0, icon="\u0088", **kwargs, ): """ :param src: integer denotes device source id, i.e. webcam 0. """ super(Webcam, self).__init__(icon=icon, **kwargs) # Capture source self._cap = None # Set after cv reads video file or webcam self.width = None self.height = None self.pos = pos self.size = size self.fps = None self.frame_count = None self.transparency = transparency self.src = src # Render into a quad in screen space (z=0) self._texture = None @Node.once def make_renderable(self, ctx): self.ctx = ctx self.quad = geometry.quad_2d( pos=self.pos, size=self.size, normals=False ) # (2,2) is Full Screen i.e. -1 to 1 in x/y
# Copyright (C) 2023 ETH Zurich, Manuel Kaufmann, Velko Vechev, Dario Mylonopoulos class Webcam(Streamable): """ Renders webcam stream to a quad. Quad is positioned in screen coordinates. """ def __init__( self, src=0, size=(2.0, 2.0), pos=(0.0, 0.0), transparency=1.0, icon="\u0088", **kwargs, ): """ :param src: integer denotes device source id, i.e. webcam 0. """ super(Webcam, self).__init__(icon=icon, **kwargs) # Capture source self._cap = None # Set after cv reads video file or webcam self.width = None self.height = None self.pos = pos self.size = size self.fps = None self.frame_count = None self.transparency = transparency self.src = src # Render into a quad in screen space (z=0) self._texture = None @Node.once def make_renderable(self, ctx): self.ctx = ctx self.quad = geometry.quad_2d( pos=self.pos, size=self.size, normals=False ) # (2,2) is Full Screen i.e. -1 to 1 in x/y
self.prog = get_screen_texture_program()
1
2023-12-07 16:13:50+00:00
12k
nexB/dejacode
workflow/models.py
[ { "identifier": "LastModifiedByField", "path": "dje/fields.py", "snippet": "class LastModifiedByField(models.ForeignKey):\n def __init__(self, *args, **kwargs):\n help_text = _(\"The application user who last modified the object.\")\n\n kwargs.update(\n {\n \"to\": settings.AUTH_USER_MODEL,\n \"on_delete\": kwargs.get(\"on_delete\", models.PROTECT),\n \"related_name\": kwargs.get(\"related_name\", \"modified_%(class)ss\"),\n \"null\": True,\n \"editable\": False,\n \"serialize\": False,\n \"help_text\": kwargs.get(\"help_text\", help_text),\n }\n )\n super().__init__(*args, **kwargs)" }, { "identifier": "DataspacedManager", "path": "dje/models.py", "snippet": "class DataspacedManager(models.Manager.from_queryset(DataspacedQuerySet)):\n def get_queryset(self):\n return super().get_queryset().select_related(\"dataspace\")" }, { "identifier": "DataspacedModel", "path": "dje/models.py", "snippet": "class DataspacedModel(models.Model):\n \"\"\"Abstract base model for all models that are keyed by Dataspace.\"\"\"\n\n dataspace = models.ForeignKey(\n to=\"dje.Dataspace\",\n on_delete=models.PROTECT,\n editable=False,\n help_text=DATASPACE_FIELD_HELP_TEXT,\n )\n\n # This field does not have unique=True because subclasses of\n # ``DataspacedModel`` should declare a unique_together meta option\n # for ``dataspace`` and ``uuid``. Objects that inherit from\n # ``DataspacedModel`` and that are copied between dataspaces will\n # have the same uuid. This means that an object's universally unique\n # identifier (uuid) may *not* be universally unique to a database row.\n uuid = models.UUIDField(\n _(\"UUID\"),\n default=uuid.uuid4,\n editable=False,\n )\n\n # From: https://docs.djangoproject.com/en/dev/topics/db/managers/\n # \"Managers from abstract base classes are always inherited by the child\n # class, [...]. Abstract base classes are designed to capture information\n # and behavior that is common to their child classes. Defining common\n # managers is an appropriate part of this common information.\"\n # As a result, all DataspacedModel models will inherit from the\n # appropriate DataspacedManager by default.\n # When the `objects` attribute is overridden in the child class, we enforce\n # that the Manager class defined is a child of the DataspacedManager\n # using the Django \"System check framework\".\n objects = DataspacedManager()\n\n def get_dataspace(self):\n return self.dataspace\n\n get_dataspace.short_description = _(\"Dataspace\")\n get_dataspace.admin_order_field = \"dataspace\"\n\n class Meta:\n abstract = True\n\n def natural_key(self):\n return self.dataspace.name, self.uuid\n\n @classmethod\n def check(cls, **kwargs):\n \"\"\"\n Enforce the usage of DataspacedManager (or child class) as the\n default Manager using the Django \"System check framework\".\n Note that Manager generated from a subclass of DataspacedQuerySet are valid:\n Manager(models.Manager.from_queryset(DataspacedQuerySet))\n \"\"\"\n errors = super().check(**kwargs)\n enforced_manager = DataspacedManager\n enforced_queryset = DataspacedQuerySet\n\n has_valid_manager = any(\n [\n isinstance(cls._default_manager, enforced_manager),\n issubclass(cls._default_manager._queryset_class, enforced_queryset),\n ]\n )\n\n if not has_valid_manager:\n manager_name = enforced_manager.__name__\n errors.append(\n checks.Error(\n f\"Manager is not a subclass of {manager_name}\",\n hint=f\"Set the proper {manager_name} Manager\",\n obj=cls,\n )\n )\n\n if cls._meta.managed and not cls._meta.unique_together:\n errors.append(\n checks.Error(\n \"`unique_together` must be defined on DataspacedModel.\",\n hint=\"Add a value for unique_together on this model Meta class.\",\n obj=cls,\n )\n )\n\n return errors\n\n def save(self, *args, **kwargs):\n \"\"\"Enforces related object to share the same Dataspace as self.\"\"\"\n # A `copy` argument is provided when calling save() from the copy.\n # It needs to be poped before calling the super().save()\n kwargs.pop(\"copy\", None)\n\n # For these model classes, related objects can still be saved even if\n # they have a dataspace which is not the current one.\n allowed_models = [Dataspace, get_user_model(), ContentType]\n\n for field in self.local_foreign_fields:\n if field.related_model not in allowed_models:\n attr_value = getattr(self, field.name)\n if attr_value and attr_value.dataspace != self.dataspace:\n raise ValueError(\n f'The Dataspace of the related object: \"{attr_value}\" '\n f'is not \"{self.dataspace}\"'\n )\n\n self.clean_extra_spaces_in_identifier_fields()\n super().save(*args, **kwargs)\n\n @classmethod\n def model_fields(cls):\n \"\"\"Return the list of fields name available on this model.\"\"\"\n return [field.name for field in cls._meta.get_fields()]\n\n @classmethod\n def create_from_data(cls, user, data, validate=False):\n \"\"\"\n Create and Return an instance of this `cls` using the provided `data`.\n The instance is created in the provided `user` Dataspace.\n\n If `validate` is enabled, the data with be validated using the `full_clean`\n method before attempting the `save`. This has the benefit of catching\n data issues and returning those as `ValidationError` instead of `DatabaseError`\n at save time, that will have an impact in the database transaction management.\n \"\"\"\n model_fields = cls.model_fields()\n cleaned_data = {\n field_name: value for field_name, value in data.items() if field_name in model_fields\n }\n\n instance = cls(\n dataspace=user.dataspace,\n created_by=user,\n **cleaned_data,\n )\n\n if validate:\n instance.full_clean()\n instance.save()\n\n return instance\n\n def update_from_data(self, user, data, override=False):\n \"\"\"\n Update this object instance with the provided `data`.\n The `save()` method is called only if at least one field was modified.\n \"\"\"\n model_fields = self.model_fields()\n updated_fields = []\n\n for field_name, value in data.items():\n if value in EMPTY_VALUES or field_name not in model_fields:\n continue\n\n current_value = getattr(self, field_name, None)\n if not current_value or (current_value != value and override):\n setattr(self, field_name, value)\n updated_fields.append(field_name)\n\n if updated_fields:\n self.last_modified_by = user\n self.save()\n\n return updated_fields\n\n def as_json(self):\n try:\n serialized_data = serialize(\n \"json\",\n [self],\n use_natural_foreign_keys=True,\n use_natural_primary_keys=True,\n )\n except SerializationError:\n serialized_data = None\n\n return serialized_data\n\n def get_verbose_name(self):\n return self._meta.verbose_name\n\n def get_url(self, name, params):\n opts = self._meta\n viewname = f\"{opts.app_label}:{opts.model_name}_{name}\"\n return reverse(viewname, args=params)\n\n def get_admin_url(self):\n opts = self._meta\n viewname = f\"admin:{opts.app_label}_{opts.model_name}_change\"\n try:\n url = reverse(viewname, args=[self.pk])\n except NoReverseMatch:\n return\n return url\n\n def get_change_url(self):\n \"\"\"\n Return the admin URL by default.\n Override this method if the object has a custom change view.\n \"\"\"\n return self.get_admin_url()\n\n def get_admin_action_url(self, name):\n opts = self._meta\n try:\n url = reverse(f\"admin:{opts.app_label}_{opts.model_name}_{name}\")\n except NoReverseMatch:\n return\n return f\"{url}?ids={self.pk}\"\n\n def get_copy_url(self):\n return self.get_admin_action_url(\"copy\")\n\n def get_api_copy_to_my_dataspace_url(self):\n model_name = self._meta.model_name\n return reverse(f\"api_v2:{model_name}-copy-to-my-dataspace\", args=[self.uuid])\n\n def get_compare_url(self):\n return self.get_admin_action_url(\"compare\")\n\n def get_html_link(self, href, **attrs):\n \"\"\"\n Return a HTML link using the given href and __str__ of the object\n as value.\n\n Anything given as kwargs will be added as attributes on the anchor.\n instance.get_html_link('a_href', target='_blank', title='Title')\n\n A dict can be also be given like this:\n attributes = **{'target': '_blank', 'title': 'Title'}\n\n A special 'field_name' attribute can be used to replace the __str__ value\n by the given model field value of the instance.\n \"\"\"\n value = attrs.pop(\"value\", None)\n if not value:\n field_name = attrs.pop(\"field_name\", None)\n value = getattr(self, field_name) if field_name else self\n\n final_attrs = {\"href\": smart_urlquote(href)}\n if attrs is not None:\n final_attrs.update(attrs)\n\n return format_html(\"<a{}>{}</a>\", flatatt(final_attrs), value)\n\n def get_admin_link(self, **attrs):\n \"\"\"Return a HTML link using the get_admin_url() as href.\"\"\"\n admin_url = self.get_admin_url()\n if admin_url:\n return self.get_html_link(self.get_admin_url(), **attrs)\n\n def get_absolute_link(self, **attrs):\n \"\"\"Return a HTML link using the get_absolute_url() as href.\"\"\"\n if hasattr(self, \"get_absolute_url\"):\n return self.get_html_link(self.get_absolute_url(), **attrs)\n\n @property\n def urn_link(self):\n \"\"\"Require the `urn` property to be implemented on the Model.\"\"\"\n urn = getattr(self, \"urn\", None)\n if urn:\n return format_html('<a href=\"{}\">{}</a>', reverse(\"urn_resolve\", args=[urn]), urn)\n\n def _get_local_foreign_fields(self):\n \"\"\"\n Return a list of ForeignKey type fields of the model.\n GenericForeignKey are not included, filtered out with field.concrete\n \"\"\"\n return [field for field in self._meta.get_fields() if field.many_to_one and field.concrete]\n\n local_foreign_fields = property(_get_local_foreign_fields)\n\n @classmethod\n def get_identifier_fields(cls):\n \"\"\"\n Return a list of the fields, based on the Meta unique_together, to be\n used to match a unique instance within a Dataspace.\n \"\"\"\n unique_fields = cls._meta.unique_together\n\n # Using only the first part of the declared unicity\n if type(unique_fields[0]) is tuple:\n unique_fields = unique_fields[0]\n\n return [str(field_name) for field_name in unique_fields if field_name != \"dataspace\"]\n\n def get_exclude_candidates_fields(self):\n \"\"\"\n Return the fields supported by the copy exclude feature.\n This exclude all the fields like the dataspace, id, uuid and\n field that do not accept a NULL value.\n \"\"\"\n from dje.copier import ALWAYS_EXCLUDE\n\n fields = []\n for field in self._meta.fields:\n skip_conditions = [\n field.related_model is Dataspace,\n isinstance(field, models.AutoField),\n isinstance(field, models.UUIDField),\n not field.null and not field.blank and not field.has_default(),\n field.name in ALWAYS_EXCLUDE,\n ]\n\n if not any(skip_conditions):\n fields.append(field)\n\n return fields\n\n @classmethod\n def get_exclude_choices(cls):\n return sorted(\n (\n (field.name, capfirst(field.verbose_name))\n for field in cls().get_exclude_candidates_fields()\n ),\n key=operator.itemgetter(1), # Sorts the choices by their verbose_name\n )\n\n def unique_filters_for(self, target):\n \"\"\"\n Return a dictionary of filters based on unicity constraints.\n (i.e. the Model Meta \"unique_together\" of the object.)\n\n The filters are used to \"match\" an existing entry in the\n \"target\" Dataspace.\n\n The result of the match is used to know if it's a copy or update case.\n Only the first field (or set of fields) declared in the unique_together\n is used as a unique_filters.\n\n This function is used during the Object \"copy\" and \"update\" to another\n Dataspace.\n \"\"\"\n unique_filters = {}\n\n for field_name in self.get_identifier_fields():\n field_instance = getattr(self, field_name)\n\n if isinstance(field_instance, DataspacedModel):\n # In this case, the current field_instance is a FK to another\n # DataspacedModel instance.\n # Trying to match the object in \"target\"...\n manager = field_instance.__class__.objects\n # ...with the UUID first\n result = manager.filter(uuid=self.uuid, dataspace=target)\n # ... with current unique_filters_for method if nothing matched\n if not result:\n filters = field_instance.unique_filters_for(target)\n result = manager.filter(**filters)\n\n if result:\n unique_filters.update({field_name: result[0]})\n else:\n unique_filters.update({field_name: None})\n else:\n unique_filters.update({field_name: field_instance})\n\n unique_filters.update({\"dataspace\": target})\n return unique_filters\n\n @staticmethod\n def get_extra_relational_fields():\n \"\"\"\n Return a list of related_name as declared on the \"Many\" part of the\n relation.\n Hook to explicitly declare the relational fields,\n like OneToMany and GenericForeignKey pointing to this Model.\n This is one by the object_copy feature.\n Default: '<fk_model_name>_set'\n \"\"\"\n return []\n\n def clean(self, from_api=False):\n if self.id: # Addition only\n return\n\n self.validate_case_insensitive_unique_on()\n self.validate_against_reference_data(from_api)\n\n def validate_case_insensitive_unique_on(self):\n \"\"\"\n Validate uniqueness via case-insensitive match, using the field\n set on this Model `case_insensitive_unique_on` property.\n The validation is only applied on Addition.\n \"\"\"\n errors = {}\n\n for field_name in getattr(self, \"case_insensitive_unique_on\", []):\n value = getattr(self, field_name, None)\n if not value:\n return\n\n msg = (\n 'The application object that you are creating already exists as \"{}\". '\n \"Note that a different case in the object name is not sufficient to \"\n \"make it unique.\"\n )\n\n qs = (\n self.__class__._default_manager.scope(self.dataspace)\n .filter(**{f\"{field_name}__iexact\": value})\n .exclude(**{f\"{field_name}__exact\": value})\n )\n\n if qs.exists():\n error = msg.format(getattr(qs.first(), field_name))\n errors.setdefault(field_name, []).append(error)\n\n if errors:\n raise ValidationError(errors)\n\n def validate_against_reference_data(self, from_api=False):\n \"\"\"\n Validate values set on a non-reference dataspace instance against reference data.\n\n Inspired by django.db.models.Model._perform_unique_checks()\n \"\"\"\n LIMITED_TO_MODELS = [\n \"Owner\",\n \"License\",\n \"LicenseCategory\",\n \"LicenseProfile\",\n \"LicenseStatus\",\n \"LicenseStyle\",\n \"LicenseTag\",\n \"Component\",\n \"ComponentKeyword\",\n \"ComponentStatus\",\n \"ComponentType\",\n \"Package\",\n ]\n\n if self.__class__.__name__ not in LIMITED_TO_MODELS:\n return\n\n reference_dataspace = Dataspace.objects.get_reference()\n dataspace = getattr(self, \"dataspace\", None)\n run_validation = all(\n [\n dataspace,\n reference_dataspace,\n dataspace != reference_dataspace,\n ]\n )\n if not run_validation:\n return\n\n or_queries = []\n involved_lookup_fields = []\n uniques_lookups = [fields for fields in self._meta.unique_together if \"uuid\" not in fields]\n\n for fields in uniques_lookups:\n lookup_kwargs = {}\n for field_name in fields:\n lookup_value = None\n if field_name != \"dataspace\":\n lookup_value = getattr(self, field_name, None)\n if lookup_value is None:\n continue\n lookup_kwargs[str(field_name)] = lookup_value\n involved_lookup_fields.append(field_name)\n\n if lookup_kwargs:\n or_queries.append(models.Q(**lookup_kwargs))\n\n if not or_queries:\n return\n\n qs = self.__class__._default_manager.filter(reduce(operator.or_, or_queries))\n\n if qs.scope(self.dataspace).exists():\n return # Skip validation if the object already exists in my own Dataspace\n\n if qs.scope(reference_dataspace).exists():\n reference_object = qs.first()\n msg = (\n \"The application object that you are creating already exists as {} \"\n \"in the reference dataspace.\"\n )\n\n if not from_api:\n copy_link = self.get_html_link(\n reference_object.get_copy_url(),\n value=_(\"Copy to my Dataspace\"),\n target=\"_blank\",\n )\n msg += f\" {copy_link}\"\n if hasattr(reference_object, \"get_absolute_url\"):\n reference_object = reference_object.get_absolute_link(target=\"_blank\")\n else:\n copy_link = reference_object.get_api_copy_to_my_dataspace_url()\n msg += (\n f\" Use the following URL to copy the reference object to your \"\n f\"local Dataspace: {copy_link}\"\n )\n\n error = format_html(msg, reference_object)\n\n if from_api:\n errors = {\n \"error\": error,\n \"copy_url\": copy_link,\n }\n else:\n errors = {field: error for field in involved_lookup_fields}\n\n raise ValidationError(errors)\n\n def clean_extra_spaces_in_identifier_fields(self):\n \"\"\"Remove extra spaces in identifier fields value.\"\"\"\n for field_name in self.get_identifier_fields():\n field_instance = self._meta.get_field(field_name)\n if isinstance(field_instance, models.CharField):\n field_value = getattr(self, field_name, \"\")\n if \" \" in field_value:\n setattr(self, field_name, \" \".join(field_value.split()))\n\n def mark_all_notifications_as_read(self, user):\n unread_notifications = Notification.objects.unread().filter(\n action_object_content_type__model=self._meta.model_name,\n action_object_object_id=self.id,\n recipient=user,\n )\n if unread_notifications:\n unread_notifications.update(unread=False)" }, { "identifier": "DataspacedQuerySet", "path": "dje/models.py", "snippet": "class DataspacedQuerySet(models.QuerySet):\n \"\"\"\n QuerySet for the DataspacedModel to be used on the Models as\n DataspacedManager (using Manager.from_queryset)\n\n Provide filters related to the Dataspace system.\n \"\"\"\n\n def get_by_natural_key(self, dataspace_name, uuid):\n return self.get(dataspace__name=dataspace_name, uuid=uuid)\n\n def scope(self, dataspace, include_reference=False):\n \"\"\"\n Limit the QuerySet results to the provided `dataspace`.\n The reference Dataspace results can be included using the\n `include_reference` argument.\n When a string is provided for `dataspace` in place of a Dataspace\n instance, the `scope_by_name` method will be called.\n \"\"\"\n if type(dataspace) is str:\n return self.scope_by_name(dataspace_name=dataspace)\n\n dataspaces = {dataspace}\n if include_reference:\n reference = Dataspace.objects.get_reference()\n if reference:\n dataspaces.add(reference)\n\n return self.filter(dataspace__in=dataspaces)\n\n def scope_by_name(self, dataspace_name):\n return self.filter(dataspace__name=dataspace_name)\n\n def scope_by_id(self, dataspace_id):\n return self.filter(dataspace__id=dataspace_id)\n\n def scope_for_user(self, user):\n return self.filter(dataspace=user.dataspace)\n\n def scope_for_user_in_admin(self, user):\n # Used in DataspacedAdmin.get_queryset()\n if user.dataspace.is_reference:\n return self # no filtering\n return self.scope(user.dataspace, include_reference=True)\n\n def get_or_none(self, *args, **kwargs):\n \"\"\"Return a single object matching the given keyword arguments, `None` otherwise.\"\"\"\n with suppress(self.model.DoesNotExist, ValidationError):\n return self.get(*args, **kwargs)\n\n def group_by(self, field_name):\n \"\"\"Return a dict of QS instances grouped by the given `field_name`.\"\"\"\n # Not using a dict comprehension to support QS without `.order_by(field_name)`.\n grouped = defaultdict(list)\n\n for field_value, group in groupby(self, attrgetter(field_name)):\n grouped[field_value].extend(list(group))\n\n return dict(grouped)" }, { "identifier": "HistoryDateFieldsMixin", "path": "dje/models.py", "snippet": "class HistoryDateFieldsMixin(models.Model):\n created_date = models.DateTimeField(\n auto_now_add=True, # Automatically set to now on object creation\n db_index=True,\n help_text=_(\"The date and time the object was created.\"),\n )\n\n last_modified_date = models.DateTimeField(\n auto_now=True, # Automatically set to now on object save()\n db_index=True,\n help_text=_(\"The date and time the object was last modified.\"),\n )\n\n class Meta:\n abstract = True" }, { "identifier": "HistoryFieldsMixin", "path": "dje/models.py", "snippet": "class HistoryFieldsMixin(HistoryUserFieldsMixin, HistoryDateFieldsMixin):\n \"\"\"Add the created_date, last_modified_date, created_by, last_modified_by fields.\"\"\"\n\n class Meta:\n abstract = True" }, { "identifier": "get_unsecured_manager", "path": "dje/models.py", "snippet": "def get_unsecured_manager(model_class):\n \"\"\"\n Return the `unsecured_objects` manager if the default one `is_secured'.\n WARNING: This is only to be used in places where a User context is not available for\n the secured manager, like management commands.\n \"\"\"\n manager = model_class._default_manager\n if is_secured(manager):\n manager = model_class.unsecured_objects\n return manager" }, { "identifier": "request_comment_slack_payload", "path": "workflow/notification.py", "snippet": "def request_comment_slack_payload(comment):\n req = comment.request\n site_url = settings.SITE_URL.rstrip(\"/\")\n\n pretext = (\n f\"[DejaCode/{req.dataspace.name}] New comment by {comment.user} on Request \"\n f\"<{site_url}{req.get_absolute_url()}|#{req.id} {req.title}> \"\n f\"(assigned to {req.assignee})\"\n )\n\n # https://api.slack.com/docs/messages/builder\n return {\n \"attachments\": [\n {\n \"fallback\": pretext,\n \"pretext\": pretext,\n \"color\": \"#ff9d2e\",\n \"text\": comment.text,\n \"ts\": f\"{comment.last_modified_date.timestamp()}\",\n }\n ]\n }" }, { "identifier": "request_slack_payload", "path": "workflow/notification.py", "snippet": "def request_slack_payload(req, created):\n color = \"#5bb75b\" if created else \"#ff9d2e\"\n action = \"created\" if created else \"updated\"\n user = req.requester if created else req.last_modified_by\n site_url = settings.SITE_URL.rstrip(\"/\")\n\n def make_field_dict(title, value):\n return {\"title\": title, \"value\": value, \"short\": True}\n\n fields = [\n make_field_dict(\"Assigned to\", f\"{req.assignee}\"),\n make_field_dict(\"Status\", f\"{req.get_status_display()}\"),\n ]\n if req.priority:\n fields.append(make_field_dict(\"Priority\", f\"{req.priority}\"))\n if req.product_context:\n fields.append(make_field_dict(\"Product context\", f\"{req.product_context}\"))\n if req.content_object:\n content_object_link = (\n f\"<{site_url}{req.content_object.get_absolute_url()}|{req.content_object}>\"\n )\n fields.append(make_field_dict(\"Applies to\", content_object_link))\n\n # https://api.slack.com/docs/messages/builder\n return {\n \"attachments\": [\n {\n \"fallback\": f\"#{req.id} {req.title} {action} by {user}\",\n \"pretext\": f\"[DejaCode/{req.dataspace.name}] Request {action} by {user}\",\n \"color\": color,\n \"title\": f\"#{req.id} {req.title}\",\n \"title_link\": f\"{site_url}{req.get_absolute_url()}\",\n \"text\": f\"{req.request_template.name}\",\n \"fields\": fields,\n \"ts\": f\"{req.last_modified_date.timestamp()}\",\n }\n ]\n }" } ]
import json import logging import os import markdown from django.conf import settings from django.contrib.contenttypes.fields import GenericForeignKey from django.contrib.contenttypes.models import ContentType from django.contrib.postgres.fields import ArrayField from django.core.exceptions import ObjectDoesNotExist from django.db import models from django.db.models import Q from django.dispatch import receiver from django.urls import reverse from django.utils import timezone from django.utils.functional import cached_property from django.utils.html import escape from django.utils.html import format_html from django.utils.translation import gettext_lazy as _ from bleach import Cleaner from bleach.linkifier import LinkifyFilter from bleach_allowlist import markdown_attrs from bleach_allowlist import markdown_tags from dje.fields import LastModifiedByField from dje.models import DataspacedManager from dje.models import DataspacedModel from dje.models import DataspacedQuerySet from dje.models import HistoryDateFieldsMixin from dje.models import HistoryFieldsMixin from dje.models import get_unsecured_manager from workflow.notification import request_comment_slack_payload from workflow.notification import request_slack_payload from workflow.api import RequestSerializer from workflow.api import RequestCommentSerializer from workflow.api import RequestSerializer
8,196
def __str__(self): return self.name def get_absolute_url(self): return reverse("workflow:request_add", args=[self.uuid]) @staticmethod def get_extra_relational_fields(): return ["questions"] def create_request(self, **kwargs): if "assignee" not in kwargs and self.default_assignee: kwargs["assignee"] = self.default_assignee return Request.objects.create( request_template=self, content_type=self.content_type, dataspace=self.dataspace, **kwargs, ) class Question(DataspacedModel): """ Represent one field of a RequestTemplate Form. WARNING: Modifying the schema of this model will require data migration (next to the usual schema migration). """ template = models.ForeignKey( to="workflow.RequestTemplate", on_delete=models.CASCADE, related_name="questions", ) label = models.CharField( max_length=255, help_text=_("Label for the form input."), ) help_text = models.TextField( blank=True, help_text=_( "Descriptive text (instructions) to display to the Requestor below the " "question." ), ) # (django.forms.fields.Field class, description) INPUT_TYPE_CHOICES = ( ("CharField", _("Text")), ("TextField", _("Paragraph text")), ("BooleanField", _("Yes/No")), ("DateField", _("Date")), ) input_type = models.CharField( max_length=30, choices=INPUT_TYPE_CHOICES, ) is_required = models.BooleanField( default=False, help_text=_("Indicate if the requestor must enter a value in the answer"), ) position = models.PositiveSmallIntegerField() class Meta: ordering = ["position"] unique_together = ("dataspace", "uuid") def __str__(self): return self.label class RequestMixin(models.Model): """Provide fields and methods for Request related models.""" request_count = models.PositiveSmallIntegerField( blank=True, null=True, ) class Meta: abstract = True def get_requests(self, user): """ We could use django.contrib.contenttypes.fields.GenericRelation instead but we don't want to avoid the cascade-deletion behavior. Private requests are included in the QuerySet but their content is not displayed. """ return Request.objects.for_activity_tab(self, user) def count_requests(self): """ Return the count of Request objects attached to this instance. Bypass the Product secured system since we need the proper count but do not have a user to provide. """ return Request.objects.for_content_object(self).count() def update_request_count(self): """ Update the `request_count` field on the instance. Using update() rather than save() to avoid noise in the history. The update is only applied if the current stored count is not the true database count. Return True if the request_count was updated. """ model_class = self.__class__ # We should have default=0 on the `request_count` field instead strored_count = self.request_count or 0 true_count = self.count_requests() if strored_count != true_count: # Use the unsecured_manager to bypass the security system and get the proper count
# # Copyright (c) nexB Inc. and others. All rights reserved. # DejaCode is a trademark of nexB Inc. # SPDX-License-Identifier: AGPL-3.0-only # See https://github.com/nexB/dejacode for support or download. # See https://aboutcode.org for more information about AboutCode FOSS projects. # logger = logging.getLogger("dje") # Add `RequestMixin` to the following Model classes. # Also add a `display_name` on the Model API Serializer. CONTENT_TYPES = ( models.Q(app_label="component_catalog", model="component") | models.Q(app_label="component_catalog", model="package") | models.Q(app_label="license_library", model="license") | models.Q(app_label="product_portfolio", model="product") ) class Priority(DataspacedModel): label = models.CharField( max_length=50, help_text=_("Concise name to identify the Priority."), ) position = models.PositiveSmallIntegerField( null=True, blank=True, db_index=True, help_text=_( "A number to control the sequence of the Priorities presented to " "the user when selecting one from the dropdown list." ), ) color_code = models.CharField( max_length=7, blank=True, help_text=_( "You can specify a valid HTML color code (e.g. #FFFFFF) to apply " "to your Priority." ), ) class Meta: unique_together = (("dataspace", "label"), ("dataspace", "uuid")) ordering = ["position", "label"] verbose_name_plural = _("priorities") def __str__(self): return self.label class RequestQuerySet(DataspacedQuerySet): BASE_SELECT_RELATED = [ "request_template", "requester", "assignee", "priority", "product_context", "last_modified_by", ] def product_secured(self, user): if not user: return self.none() product_ct = ContentType.objects.get_by_natural_key("product_portfolio", "product") product_qs = product_ct.model_class().objects.get_queryset(user=user) return ( self.scope(user.dataspace) .filter( # If a product_context is set, Limit to authorized Products Q(product_context__isnull=True) | Q(product_context__in=product_qs), ) .exclude( # If a Product type content_object is set, excludes non-authorized Products Q(content_type=product_ct) & Q(object_id__isnull=False) & ~Q(object_id__in=product_qs), ) ) def unassigned(self): """Limit the QuerySet to unassigned Requests.""" return self.filter(assignee__isnull=True) def assigned_to(self, user): """Limit the QuerySet to Requests assigned to the given user.""" return self.filter(assignee=user) def created_by(self, user): """Limit the QuerySet to Requests created by the given user.""" return self.filter(requester=user) def followed_by(self, user): """ Limit the QuerySet to Requests followed by the given user: requester, assignee, commented or attached a file. """ return self.filter( Q(requester=user) | Q(assignee=user) | Q(comments__user=user) | Q(attachments__uploader=user) ) def open(self): return self.filter(status=Request.Status.OPEN) def closed(self): return self.filter(status=Request.Status.CLOSED) def with_comments_attachments_counts(self): return self.annotate( attachments_count=models.Count("attachments", distinct=True), comments_count=models.Count("comments", distinct=True), ) def for_list_view(self, user): return ( self.product_secured(user) .with_comments_attachments_counts() .select_related(*self.BASE_SELECT_RELATED) .prefetch_related( "content_object__dataspace", "product_context__dataspace", ) .distinct() ) def for_details_view(self, user): return ( self.product_secured(user) .select_related(*self.BASE_SELECT_RELATED) .prefetch_related( "attachments__uploader", "comments__user", ) ) def for_edit_view(self, user): return self.product_secured(user).select_related(*self.BASE_SELECT_RELATED) def for_content_object(self, content_object, user=None): """Limit the QuerySet to Requests attach to given `content_object`.""" base_qs = self.product_secured(user) if user else self return base_qs.filter( content_type=ContentType.objects.get_for_model(content_object), object_id=content_object.id, ) def for_activity_tab(self, content_object, user): return ( self.for_content_object(content_object, user) .with_comments_attachments_counts() .select_related(*self.BASE_SELECT_RELATED) ) class Request(HistoryDateFieldsMixin, DataspacedModel): request_template = models.ForeignKey( to="workflow.RequestTemplate", related_name="requests", on_delete=models.PROTECT, editable=False, ) class Status(models.TextChoices): OPEN = "open", _("Open") CLOSED = "closed", _("Closed") DRAFT = "draft", _("Draft") status = models.CharField( max_length=10, choices=Status.choices, default=Status.OPEN, db_index=True, help_text=_( 'Status of the request. "Draft" indicates that the request is not ' "yet ready for action, pending further details from the requestor. " '"Open" indicates that the assignee has not finished the requested ' "actions, and also that comments from all interested parties are " 'welcome. "Closed" indicates that no further actions or comments ' "are needed or expected." ), ) is_private = models.BooleanField( default=False, db_index=True, help_text=_( "When checked, the details of this request are visible only" " to the original requester and to request reviewers, and " "other users only see a limited summary. As an " "administrator, you can check or un-check this indicator to" " make a request private or public." ), ) notes = models.TextField( blank=True, help_text=_( "Notes from one or more request reviewers regarding " "research, issues, and conclusions related to the " "request." ), ) requester = models.ForeignKey( to=settings.AUTH_USER_MODEL, on_delete=models.PROTECT, related_name="request_as_requester", editable=False, help_text=_("Creator of the request."), ) assignee = models.ForeignKey( to=settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, related_name="request_as_assignee", limit_choices_to={"is_staff": True, "is_active": True}, null=True, blank=True, help_text=_( "The application user currently assigned to review the " "request and take appropriate action." ), ) product_context = models.ForeignKey( to="product_portfolio.Product", on_delete=models.SET_NULL, null=True, blank=True, # Bypass the validation in ForeignKey.validate() # Required since we do not have control over the QuerySet in that method. parent_link=True, help_text=_("Identify the Product impacted by your Request."), ) serialized_data = models.TextField( blank=True, help_text=_( "Optional data provided by the User making the request. " "Can be used by an Admin to pre-fill a form. Stored as " "JSON format." ), ) content_type = models.ForeignKey( to=ContentType, on_delete=models.PROTECT, limit_choices_to=CONTENT_TYPES, help_text=_( "Stores the type of the object requested. Supported types " "are Component, Package, License and Product" ), ) object_id = models.PositiveIntegerField( null=True, blank=True, db_index=True, help_text=_( "ID of the object attached to this request. This is used " "in combination with the content_type for the " "content_object field." ), ) # No need to be explicit about the content_type abd object_id field names as # we are using the default ones. content_object = GenericForeignKey() content_object_repr = models.CharField( max_length=1000, blank=True, help_text=_( "String representation of the attached content_object if any. " "This is useful for search purposes and not intended for display." ), ) priority = models.ForeignKey( to="workflow.Priority", on_delete=models.PROTECT, null=True, blank=True, help_text=_( "The priority is intended to provide team members with a guideline " "for selecting and assigning requests for additional action, based on the " "criticality of the request." ), ) title = models.CharField( max_length=255, db_index=True, help_text=_("The Request Title is a concise statement of the Request purpose and content."), ) cc_emails = ArrayField( base_field=models.EmailField(), null=True, blank=True, help_text=_( "You can provide a comma-separated list of email addresses to publish email " "notifications to any users that should be aware of the progress of this request." ), ) last_modified_by = LastModifiedByField() objects = DataspacedManager.from_queryset(RequestQuerySet)() class Meta: ordering = ["-last_modified_date"] unique_together = ("dataspace", "uuid") def __str__(self): return f"#{self.pk}" def save(self, *args, **kwargs): """Add the `update_request_count` logic on the related `content_object`.""" self.content_type = self.request_template.content_type # Store the repr of the content_object for search purposes. if self.object_id: # Bypass the broken GenericForeignKey.__get__ introduced in # https://github.com/django/django/commit/cc4cb95 try: self.content_object = self.content_type.get_object_for_this_type( id=self.object_id, ) except ObjectDoesNotExist: pass else: self.content_object_repr = str(self.content_object) # `previous_object_id` logic is only required on edition. previous_object_id = None is_addition = self.pk if is_addition: previous_object_id = self.__class__.objects.get(pk=self.pk).object_id super().save(*args, **kwargs) # Need to be post-save so the current Request exists in the DB before the count() if self.content_object and not self.is_draft: self.content_object.update_request_count() # The `content_object` was changed or removed, we need to update the `request_count` # of the previous object instance too. Warning: The previous object may not exist anymore. if previous_object_id and previous_object_id != self.object_id: try: previous_object = self.content_type.get_object_for_this_type(id=previous_object_id) except ObjectDoesNotExist: return previous_object.update_request_count() def get_absolute_url(self): return reverse("workflow:request_details", args=[self.uuid]) @property def details_url(self): return self.get_absolute_url() def get_serialized_data(self): if not self.serialized_data: return {} try: serialized_data = json.loads(self.serialized_data) except (ValueError, TypeError): return {} if not isinstance(serialized_data, dict): return {} return serialized_data def get_serialized_data_as_list(self): """Return a python iterable from the serialized_data field.""" serialized_data = self.get_serialized_data() if not serialized_data: return [] return [ { "label": question.label, "input_type": question.input_type, "value": serialized_data.get(question.label), } for question in self.request_template.questions.all() ] def get_serialized_data_as_html(self, html_template="{label}: {value}", separator="<br>"): """Return a HTML content of the serialized_data.""" serialized_data = [] for data in self.get_serialized_data_as_list(): try: value = data["value"] if data["input_type"] == "BooleanField": value = "Yes" if bool(data.get("value")) == 1 else "No" line = str(html_template).format(label=data["label"], value=escape(value)) except KeyError: return 'Error in the "Serialized data" value.' else: serialized_data.append(line) return format_html(separator.join(serialized_data)) @property def serialized_data_html(self): return self.get_serialized_data_as_html() @property def is_open(self): return self.status == self.Status.OPEN @property def is_closed(self): return self.status == self.Status.CLOSED @property def is_draft(self): return self.status == self.Status.DRAFT def has_details_permission(self, user): """ Private Requests are not available to regular user unless he is the requester or is an administrator. """ return user == self.requester or user.is_staff or not self.is_private def has_edit_permission(self, user): """ Only the requester or an administrator can edit a Request, unless the Request is closed already. """ return (user == self.requester or user.is_staff) and not self.is_closed def has_close_permission(self, user): """Only the requester can close a Request if not closed already.""" return user == self.requester and not self.is_closed def get_involved_users(self, exclude=None): """ Return the set of users involved is the Requests: - requestor - assignee - edited by (multiple) - commented by (multiple) """ users = { self.requester, *(event.user for event in self.events.all()), *(comment.user for comment in self.comments.all()), } # The assignee is now required on the RequestForm but not on the Request model. # Keeping this condition for compatibility with old Request instances. if self.assignee: users.add(self.assignee) if exclude: users.discard(exclude) return users def serialize_hook(self, hook): if "hooks.slack.com" in hook.target: return request_slack_payload(self, created="added" in hook.event) serializer = RequestSerializer(self, context={"request": None}) return { "hook": hook.dict(), "data": serializer.data, } @receiver(models.signals.post_delete, sender=Request) def update_request_count_on_delete(sender, instance=None, **kwargs): """ Update the `request_count` on the content_object after deleting the Request instance. Using the `post_delete` signal instead of overriding the `delete()` method as it ensure this logic gets executed on bulk deletion as well. See https://docs.djangoproject.com/en/dev/topics/db/models/#overriding-predefined-model-methods """ if instance.object_id and instance.content_object: instance.content_object.update_request_count() class AbstractRequestEvent(HistoryDateFieldsMixin, DataspacedModel): user = models.ForeignKey( to=settings.AUTH_USER_MODEL, on_delete=models.PROTECT, editable=False, ) text = models.TextField() class Meta: abstract = True class RequestEvent(AbstractRequestEvent): request = models.ForeignKey( to="workflow.Request", on_delete=models.CASCADE, related_name="events", ) EDIT = 1 ATTACHMENT = 2 CLOSED = 3 EVENT_TYPE_CHOICES = ( (EDIT, "Edition"), (ATTACHMENT, "Attachment"), (CLOSED, "Closed"), ) event_type = models.IntegerField( choices=EVENT_TYPE_CHOICES, ) class Meta: ordering = ["created_date"] unique_together = ("dataspace", "uuid") def __str__(self): return f"{self.get_event_type_display()} by {self.user.username}" class RequestComment(AbstractRequestEvent): request = models.ForeignKey( to="workflow.Request", on_delete=models.CASCADE, related_name="comments", ) class Meta: ordering = ["created_date"] unique_together = ("dataspace", "uuid") def __str__(self): return f"{self.user.username}: {self.text[:50]}..." def has_delete_permission(self, user): """ Only the Commenter or an administrator with the proper permissions can delete a Comment. """ return user == self.user or ( user.is_staff and user.has_perm("workflow.delete_requestcomment") ) def as_html(self): """ Convert user provided commented content into HTML using markdown. The URLs are converted into links using the bleach Linkify feature. The HTML code is sanitized using bleach to prevent XSS attacks. The clean needs to be applied to the Markdown’s output, not the input. See https://michelf.ca/blog/2010/markdown-and-xss/ for details. See also the chapter about safe mode in https://python-markdown.github.io/change_log/release-3.0/ """ unsafe_html = markdown.markdown( text=self.text, extensions=["markdown.extensions.nl2br"], ) # Using `Cleaner()` with the 1LinkifyFilter1 to clean and linkify in one pass. # See https://bleach.readthedocs.io/en/latest/linkify.html notes cleaner = Cleaner( tags=markdown_tags, attributes=markdown_attrs, filters=[LinkifyFilter], ) html = cleaner.clean(unsafe_html) return format_html(html) def serialize_hook(self, hook): if "hooks.slack.com" in hook.target: return request_comment_slack_payload(self) comment_serializer = RequestCommentSerializer(self, context={"request": None}) request_serializer = RequestSerializer(self.request, context={"request": None}) data = comment_serializer.data data["request"] = request_serializer.data return { "hook": hook.dict(), "data": data, } class RequestTemplateQuerySet(DataspacedQuerySet): def actives(self): return self.filter(is_active=True) def for_content_type(self, content_type): """ Return the active RequestTemplate instances within a given dataspace for a given model class using the content_type. """ return self.filter(content_type=content_type) class RequestTemplate(HistoryFieldsMixin, DataspacedModel): """ WARNING: Modifying the schema of this model will require data migration (next to the usual schema migration). """ name = models.CharField( max_length=100, help_text=_("Unique name of the template."), ) description = models.TextField( verbose_name=_("Request header text"), help_text=_( "Provide a title and/or general instructions to the Requestor about this " "Request form." ), ) content_type = models.ForeignKey( to=ContentType, on_delete=models.PROTECT, verbose_name=_("object type"), limit_choices_to=CONTENT_TYPES, help_text=_("You can define one Request Template for each application object."), ) is_active = models.BooleanField( default=False, db_index=True, help_text=_( "Enable this to set the current form active. " "Only one Form can be active per content type." ), ) include_applies_to = models.BooleanField( default=True, help_text=_( 'Enable this to present an "Applies to" field to a requester creating a ' "request based on this template, or anyone subsequently editing that request. " 'Disable it for a request that does not need an "Applies to" reference.' ), ) include_product = models.BooleanField( default=False, help_text=_( "Enable this to present a Product choice to a requester using this template. " "Disable it for a request that does not need a Product context." ), ) default_assignee = models.ForeignKey( to=settings.AUTH_USER_MODEL, limit_choices_to={"is_staff": True, "is_active": True}, on_delete=models.SET_NULL, null=True, blank=True, serialize=False, help_text=_( "Optionally specify the application user that should be the first to review " "a request using this template, and should receive an email when the request " "is submitted." ), ) objects = DataspacedManager.from_queryset(RequestTemplateQuerySet)() class Meta: unique_together = (("dataspace", "name"), ("dataspace", "uuid")) ordering = ["name"] def __str__(self): return self.name def get_absolute_url(self): return reverse("workflow:request_add", args=[self.uuid]) @staticmethod def get_extra_relational_fields(): return ["questions"] def create_request(self, **kwargs): if "assignee" not in kwargs and self.default_assignee: kwargs["assignee"] = self.default_assignee return Request.objects.create( request_template=self, content_type=self.content_type, dataspace=self.dataspace, **kwargs, ) class Question(DataspacedModel): """ Represent one field of a RequestTemplate Form. WARNING: Modifying the schema of this model will require data migration (next to the usual schema migration). """ template = models.ForeignKey( to="workflow.RequestTemplate", on_delete=models.CASCADE, related_name="questions", ) label = models.CharField( max_length=255, help_text=_("Label for the form input."), ) help_text = models.TextField( blank=True, help_text=_( "Descriptive text (instructions) to display to the Requestor below the " "question." ), ) # (django.forms.fields.Field class, description) INPUT_TYPE_CHOICES = ( ("CharField", _("Text")), ("TextField", _("Paragraph text")), ("BooleanField", _("Yes/No")), ("DateField", _("Date")), ) input_type = models.CharField( max_length=30, choices=INPUT_TYPE_CHOICES, ) is_required = models.BooleanField( default=False, help_text=_("Indicate if the requestor must enter a value in the answer"), ) position = models.PositiveSmallIntegerField() class Meta: ordering = ["position"] unique_together = ("dataspace", "uuid") def __str__(self): return self.label class RequestMixin(models.Model): """Provide fields and methods for Request related models.""" request_count = models.PositiveSmallIntegerField( blank=True, null=True, ) class Meta: abstract = True def get_requests(self, user): """ We could use django.contrib.contenttypes.fields.GenericRelation instead but we don't want to avoid the cascade-deletion behavior. Private requests are included in the QuerySet but their content is not displayed. """ return Request.objects.for_activity_tab(self, user) def count_requests(self): """ Return the count of Request objects attached to this instance. Bypass the Product secured system since we need the proper count but do not have a user to provide. """ return Request.objects.for_content_object(self).count() def update_request_count(self): """ Update the `request_count` field on the instance. Using update() rather than save() to avoid noise in the history. The update is only applied if the current stored count is not the true database count. Return True if the request_count was updated. """ model_class = self.__class__ # We should have default=0 on the `request_count` field instead strored_count = self.request_count or 0 true_count = self.count_requests() if strored_count != true_count: # Use the unsecured_manager to bypass the security system and get the proper count
get_unsecured_manager(model_class).filter(pk=self.pk).update(request_count=true_count)
6
2023-12-07 16:57:42+00:00
12k
wusize/CLIM
src/open_clip/eva_clip/model.py
[ { "identifier": "ModifiedResNet", "path": "src/open_clip/eva_clip/modified_resnet.py", "snippet": "class ModifiedResNet(nn.Module):\n \"\"\"\n A ResNet class that is similar to torchvision's but contains the following changes:\n - There are now 3 \"stem\" convolutions as opposed to 1, with an average pool instead of a max pool.\n - Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1\n - The final pooling layer is a QKV attention instead of an average pool\n \"\"\"\n\n def __init__(self, layers, output_dim, heads, image_size=224, width=64):\n super().__init__()\n self.output_dim = output_dim\n self.image_size = image_size\n\n # the 3-layer stem\n self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(width // 2)\n self.act1 = nn.ReLU(inplace=True)\n self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(width // 2)\n self.act2 = nn.ReLU(inplace=True)\n self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)\n self.bn3 = nn.BatchNorm2d(width)\n self.act3 = nn.ReLU(inplace=True)\n self.avgpool = nn.AvgPool2d(2)\n\n # residual layers\n self._inplanes = width # this is a *mutable* variable used during construction\n self.layer1 = self._make_layer(width, layers[0])\n self.layer2 = self._make_layer(width * 2, layers[1], stride=2)\n self.layer3 = self._make_layer(width * 4, layers[2], stride=2)\n self.layer4 = self._make_layer(width * 8, layers[3], stride=2)\n\n embed_dim = width * 32 # the ResNet feature dimension\n self.attnpool = AttentionPool2d(image_size // 32, embed_dim, heads, output_dim)\n\n self.init_parameters()\n\n def _make_layer(self, planes, blocks, stride=1):\n layers = [Bottleneck(self._inplanes, planes, stride)]\n\n self._inplanes = planes * Bottleneck.expansion\n for _ in range(1, blocks):\n layers.append(Bottleneck(self._inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def init_parameters(self):\n if self.attnpool is not None:\n std = self.attnpool.c_proj.in_features ** -0.5\n nn.init.normal_(self.attnpool.q_proj.weight, std=std)\n nn.init.normal_(self.attnpool.k_proj.weight, std=std)\n nn.init.normal_(self.attnpool.v_proj.weight, std=std)\n nn.init.normal_(self.attnpool.c_proj.weight, std=std)\n\n for resnet_block in [self.layer1, self.layer2, self.layer3, self.layer4]:\n for name, param in resnet_block.named_parameters():\n if name.endswith(\"bn3.weight\"):\n nn.init.zeros_(param)\n\n def lock(self, unlocked_groups=0, freeze_bn_stats=False):\n assert unlocked_groups == 0, 'partial locking not currently supported for this model'\n for param in self.parameters():\n param.requires_grad = False\n if freeze_bn_stats:\n freeze_batch_norm_2d(self)\n\n @torch.jit.ignore\n def set_grad_checkpointing(self, enable=True):\n # FIXME support for non-transformer\n pass\n\n def stem(self, x):\n x = self.act1(self.bn1(self.conv1(x)))\n x = self.act2(self.bn2(self.conv2(x)))\n x = self.act3(self.bn3(self.conv3(x)))\n x = self.avgpool(x)\n return x\n\n def forward(self, x):\n x = self.stem(x)\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n x = self.attnpool(x)\n\n return x" }, { "identifier": "TimmModel", "path": "src/open_clip/eva_clip/timm_model.py", "snippet": "class TimmModel(nn.Module):\n \"\"\" timm model adapter\n # FIXME this adapter is a work in progress, may change in ways that break weight compat\n \"\"\"\n\n def __init__(\n self,\n model_name,\n embed_dim,\n image_size=224,\n pool='avg',\n proj='linear',\n proj_bias=False,\n drop=0.,\n pretrained=False):\n super().__init__()\n if timm is None:\n raise RuntimeError(\"Please `pip install timm` to use timm models.\")\n\n self.image_size = to_2tuple(image_size)\n self.trunk = timm.create_model(model_name, pretrained=pretrained)\n feat_size = self.trunk.default_cfg.get('pool_size', None)\n feature_ndim = 1 if not feat_size else 2\n if pool in ('abs_attn', 'rot_attn'):\n assert feature_ndim == 2\n # if attn pooling used, remove both classifier and default pool\n self.trunk.reset_classifier(0, global_pool='')\n else:\n # reset global pool if pool config set, otherwise leave as network default\n reset_kwargs = dict(global_pool=pool) if pool else {}\n self.trunk.reset_classifier(0, **reset_kwargs)\n prev_chs = self.trunk.num_features\n\n head_layers = OrderedDict()\n if pool == 'abs_attn':\n head_layers['pool'] = AbsAttentionPool2d(prev_chs, feat_size=feat_size, out_features=embed_dim)\n prev_chs = embed_dim\n elif pool == 'rot_attn':\n head_layers['pool'] = RotAttentionPool2d(prev_chs, out_features=embed_dim)\n prev_chs = embed_dim\n else:\n assert proj, 'projection layer needed if non-attention pooling is used.'\n\n # NOTE attention pool ends with a projection layer, so proj should usually be set to '' if such pooling is used\n if proj == 'linear':\n head_layers['drop'] = nn.Dropout(drop)\n head_layers['proj'] = nn.Linear(prev_chs, embed_dim, bias=proj_bias)\n elif proj == 'mlp':\n head_layers['mlp'] = Mlp(prev_chs, 2 * embed_dim, embed_dim, drop=drop, bias=(True, proj_bias))\n\n self.head = nn.Sequential(head_layers)\n\n def lock(self, unlocked_groups=0, freeze_bn_stats=False):\n \"\"\" lock modules\n Args:\n unlocked_groups (int): leave last n layer groups unlocked (default: 0)\n \"\"\"\n if not unlocked_groups:\n # lock full model\n for param in self.trunk.parameters():\n param.requires_grad = False\n if freeze_bn_stats:\n freeze_batch_norm_2d(self.trunk)\n else:\n # NOTE: partial freeze requires latest timm (master) branch and is subject to change\n try:\n # FIXME import here until API stable and in an official release\n from timm.models.helpers import group_parameters, group_modules\n except ImportError:\n raise RuntimeError(\n 'Please install latest timm `pip install git+https://github.com/rwightman/pytorch-image-models`')\n matcher = self.trunk.group_matcher()\n gparams = group_parameters(self.trunk, matcher)\n max_layer_id = max(gparams.keys())\n max_layer_id = max_layer_id - unlocked_groups\n for group_idx in range(max_layer_id + 1):\n group = gparams[group_idx]\n for param in group:\n self.trunk.get_parameter(param).requires_grad = False\n if freeze_bn_stats:\n gmodules = group_modules(self.trunk, matcher, reverse=True)\n gmodules = {k for k, v in gmodules.items() if v <= max_layer_id}\n freeze_batch_norm_2d(self.trunk, gmodules)\n\n @torch.jit.ignore\n def set_grad_checkpointing(self, enable=True):\n try:\n self.trunk.set_grad_checkpointing(enable)\n except Exception as e:\n logging.warning('grad checkpointing not supported for this timm image tower, continuing without...')\n\n def forward(self, x):\n x = self.trunk(x)\n x = self.head(x)\n return x" }, { "identifier": "EVAVisionTransformer", "path": "src/open_clip/eva_clip/eva_vit_model.py", "snippet": "class EVAVisionTransformer(nn.Module):\n \"\"\" Vision Transformer with support for patch or hybrid CNN input stage\n \"\"\"\n def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,\n num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,\n drop_path_rate=0., norm_layer=nn.LayerNorm, init_values=None, patch_dropout=0.,\n use_abs_pos_emb=True, use_rel_pos_bias=False, use_shared_rel_pos_bias=False, rope=False,\n use_mean_pooling=True, init_scale=0.001, grad_checkpointing=False, xattn=False, postnorm=False,\n pt_hw_seq_len=16, intp_freq=False, naiveswiglu=False, subln=False):\n super().__init__()\n self.image_size = img_size\n self.num_heads = num_heads\n self.num_classes = num_classes\n self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models\n\n self.patch_embed = PatchEmbed(\n img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)\n num_patches = self.patch_embed.num_patches\n\n self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))\n # self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim))\n if use_abs_pos_emb:\n self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))\n else:\n self.pos_embed = None\n self.pos_drop = nn.Dropout(p=drop_rate)\n\n if use_shared_rel_pos_bias:\n self.rel_pos_bias = RelativePositionBias(window_size=self.patch_embed.patch_shape, num_heads=num_heads)\n else:\n self.rel_pos_bias = None\n\n if rope:\n half_head_dim = embed_dim // num_heads // 2\n hw_seq_len = img_size // patch_size\n self.rope = VisionRotaryEmbeddingFast(\n dim=half_head_dim,\n pt_seq_len=pt_hw_seq_len,\n ft_seq_len=hw_seq_len if intp_freq else None,\n # patch_dropout=patch_dropout\n )\n else: \n self.rope = None\n\n self.naiveswiglu = naiveswiglu\n\n dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule\n self.use_rel_pos_bias = use_rel_pos_bias\n self.blocks = nn.ModuleList([\n Block(\n dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,\n drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,\n init_values=init_values, window_size=self.patch_embed.patch_shape if use_rel_pos_bias else None,\n xattn=xattn, rope=self.rope, postnorm=postnorm, subln=subln, naiveswiglu=naiveswiglu)\n for i in range(depth)])\n self.norm = nn.Identity() if use_mean_pooling else norm_layer(embed_dim)\n self.fc_norm = norm_layer(embed_dim) if use_mean_pooling else None\n self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()\n\n if self.pos_embed is not None:\n trunc_normal_(self.pos_embed, std=.02)\n\n trunc_normal_(self.cls_token, std=.02)\n # trunc_normal_(self.mask_token, std=.02)\n\n self.apply(self._init_weights)\n self.fix_init_weight()\n\n if isinstance(self.head, nn.Linear):\n trunc_normal_(self.head.weight, std=.02)\n self.head.weight.data.mul_(init_scale)\n self.head.bias.data.mul_(init_scale)\n\n # setting a patch_dropout of 0. would mean it is disabled and this function would be the identity fn\n self.patch_dropout = PatchDropout(patch_dropout) if patch_dropout > 0. else nn.Identity()\n\n self.grad_checkpointing = grad_checkpointing\n\n def fix_init_weight(self):\n def rescale(param, layer_id):\n param.div_(math.sqrt(2.0 * layer_id))\n\n for layer_id, layer in enumerate(self.blocks):\n rescale(layer.attn.proj.weight.data, layer_id + 1)\n if self.naiveswiglu:\n rescale(layer.mlp.w3.weight.data, layer_id + 1)\n else:\n rescale(layer.mlp.fc2.weight.data, layer_id + 1)\n\n def get_cast_dtype(self) -> torch.dtype:\n return self.blocks[0].mlp.fc2.weight.dtype\n\n def _init_weights(self, m):\n if isinstance(m, nn.Linear):\n trunc_normal_(m.weight, std=.02)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.LayerNorm):\n nn.init.constant_(m.bias, 0)\n nn.init.constant_(m.weight, 1.0)\n\n def get_num_layers(self):\n return len(self.blocks)\n\n def lock(self, unlocked_groups=0, freeze_bn_stats=False):\n for param in self.parameters():\n param.requires_grad = False\n\n def _unlock(x):\n if isinstance(x, list):\n for g in x:\n _unlock(g)\n else:\n if isinstance(x, torch.nn.Parameter):\n x.requires_grad = True\n else:\n for p in x.parameters():\n p.requires_grad = True\n\n for blk in self.blocks[-unlocked_groups:]:\n _unlock(blk)\n\n @torch.jit.ignore\n def set_grad_checkpointing(self, enable=True):\n self.grad_checkpointing = enable\n\n @torch.jit.ignore\n def no_weight_decay(self):\n return {'pos_embed', 'cls_token'}\n\n def get_classifier(self):\n return self.head\n\n def reset_classifier(self, num_classes, global_pool=''):\n self.num_classes = num_classes\n self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()\n\n def forward_features(self, x, return_all_features=False):\n bs, _, h, w = x.shape\n h = h // self.patch_embed.patch_size[0]\n w = w // self.patch_embed.patch_size[1]\n x = self.patch_embed(x)\n batch_size, seq_len, _ = x.size()\n\n cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks\n x = torch.cat((cls_tokens, x), dim=1)\n if self.pos_embed is not None:\n x = x + self.rescale_positional_embedding(out_size=(h, w))\n x = self.pos_drop(x)\n\n # a patch_dropout of 0. would mean it is disabled and this function would do nothing but return what was passed in\n if os.getenv('RoPE') == '1':\n if self.training and not isinstance(self.patch_dropout, nn.Identity):\n x, patch_indices_keep = self.patch_dropout(x)\n self.rope.forward = partial(self.rope.forward, patch_indices_keep=patch_indices_keep)\n else:\n self.rope.forward = partial(self.rope.forward, patch_indices_keep=None)\n x = self.patch_dropout(x)\n else:\n x = self.patch_dropout(x)\n\n rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None\n for blk in self.blocks:\n if self.grad_checkpointing:\n x = checkpoint(blk, x, (rel_pos_bias,))\n else:\n x = blk(x, rel_pos_bias=rel_pos_bias)\n\n if not return_all_features:\n x = self.norm(x)\n if self.fc_norm is not None:\n return self.fc_norm(x.mean(1))\n else:\n return x[:, 0]\n return x\n\n def post_attention(self, x, return_all_features=False):\n if not return_all_features:\n x = self.norm(x)\n if self.fc_norm is not None:\n return self.fc_norm(x.mean(1))\n else:\n return x[:, 0]\n return x\n\n def forward(self, x, return_all_features=False):\n if return_all_features:\n return self.forward_features(x, return_all_features)\n x = self.forward_features(x)\n x = self.head(x)\n return x\n\n def encode_dense(self, x, keep_shape=True):\n bs, _, h, w = x.shape\n h = h // self.patch_embed.patch_size[0]\n w = w // self.patch_embed.patch_size[1]\n x = self.patch_embed(x)\n batch_size, seq_len, _ = x.size()\n\n cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks\n x = torch.cat((cls_tokens, x), dim=1)\n if self.pos_embed is not None:\n x = x + self.rescale_positional_embedding(out_size=(h, w))\n x = self.pos_drop(x)\n\n # a patch_dropout of 0. would mean it is disabled and this function would do nothing but return what was passed in\n if os.getenv('RoPE') == '1':\n if self.training and not isinstance(self.patch_dropout, nn.Identity):\n x, patch_indices_keep = self.patch_dropout(x)\n self.rope.forward = partial(self.rope.forward, patch_indices_keep=patch_indices_keep)\n else:\n self.rope.forward = partial(self.rope.forward, patch_indices_keep=None)\n x = self.patch_dropout(x)\n else:\n x = self.patch_dropout(x)\n\n rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None\n for blk in self.blocks[:-1]:\n x = blk(x, rel_pos_bias=rel_pos_bias)\n x = self.blocks[-1].forward_without_attn(x)[:, 1:]\n x = self.norm(x)\n x = self.head(x)\n assert self.fc_norm is None\n\n x = F.normalize(x, dim=-1) # normalize along last dimension\n if keep_shape:\n x = x.view(bs, h, w, -1).permute(0, 3, 1, 2)\n return x\n\n def extract_roi_features(self, x, normed_boxes, **kwargs):\n x = self.encode_dense(x, keep_shape=True)\n\n return roi_align(x, self._denormalize_boxes(normed_boxes, x), (1, 1),\n 1.0, -1, True)[..., 0, 0]\n\n def rescale_positional_embedding(self, out_size):\n h, w = out_size\n if (h, w) == self.patch_embed.patch_shape:\n return self.pos_embed\n rescaled_positional_embedding = \\\n self.pos_embed.new_zeros(1, 1 + h*w, self.pos_embed.shape[2])\n rescaled_positional_embedding[0, 0] = self.pos_embed[0, 0]\n pe_2d = self.pos_embed[0, 1:].T.contiguous().view(\n 1, -1, *self.patch_embed.patch_shape)\n pe_2d = F.interpolate(pe_2d, out_size, mode='bicubic', align_corners=False).view(-1, h*w)\n rescaled_positional_embedding[0, 1:] = pe_2d.T.contiguous()\n\n return rescaled_positional_embedding\n\n def mask_pool(self, x, masks):\n feature_map = self.encode_dense(x, keep_shape=False)\n num_masks_per_image = [len(masks_per_image) for masks_per_image in masks]\n masks = torch.cat(masks).float().flatten(-2, -1) # bs, h*w\n feature_map = torch.repeat_interleave(\n feature_map, torch.tensor(num_masks_per_image, device=feature_map.device), dim=0)\n features = (feature_map * masks.unsqueeze(-1)).sum(1) / (masks.sum(1, keepdim=True) + 1e-12)\n\n return features\n\n @staticmethod\n def _denormalize_boxes(normed_boxes, x):\n h, w = x.shape[-2:]\n denormed_boxes = []\n for boxes in normed_boxes:\n new_boxes = boxes.clone() # FIXME: do not change the value in normed_boxes!\n new_boxes[:, [0, 2]] *= w\n new_boxes[:, [1, 3]] *= h\n denormed_boxes.append(new_boxes)\n return denormed_boxes\n\n def encode_rois_and_image(self, x, normed_boxes):\n bs, _, h, w = x.shape\n h = h // self.patch_embed.patch_size[0]\n w = w // self.patch_embed.patch_size[1]\n x = self.patch_embed(x)\n batch_size, seq_len, _ = x.size()\n\n cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks\n x = torch.cat((cls_tokens, x), dim=1)\n if self.pos_embed is not None:\n x = x + self.rescale_positional_embedding(out_size=(h, w))\n x = self.pos_drop(x)\n\n # a patch_dropout of 0. would mean it is disabled and this function would do nothing but return what was passed in\n if os.getenv('RoPE') == '1':\n if self.training and not isinstance(self.patch_dropout, nn.Identity):\n x, patch_indices_keep = self.patch_dropout(x)\n self.rope.forward = partial(self.rope.forward, patch_indices_keep=patch_indices_keep)\n else:\n self.rope.forward = partial(self.rope.forward, patch_indices_keep=None)\n x = self.patch_dropout(x)\n else:\n x = self.patch_dropout(x)\n\n rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None\n for blk in self.blocks[:-1]:\n x = blk(x, rel_pos_bias=rel_pos_bias)\n x_image = self.head(\n self.post_attention(\n self.blocks[-1](\n x, rel_pos_bias=rel_pos_bias)\n )\n )\n x_image = F.normalize(x_image, dim=-1)\n\n x = self.blocks[-1].forward_without_attn(x)[:, 1:]\n x = self.norm(x)\n x = self.head(x)\n assert self.fc_norm is None\n x = F.normalize(x, dim=-1) # normalize along last dimension\n x = x.view(bs, h, w, -1).permute(0, 3, 1, 2)\n x_rois = roi_align(x, self._denormalize_boxes(normed_boxes, x),\n (1, 1), 1.0, -1, True)[..., 0, 0]\n x_rois = F.normalize(x_rois, dim=-1)\n\n return x_rois, x_image" }, { "identifier": "LayerNorm", "path": "src/open_clip/eva_clip/transformer.py", "snippet": "class LayerNorm(nn.LayerNorm):\n \"\"\"Subclass torch's LayerNorm (with cast back to input dtype).\"\"\"\n\n def forward(self, x: torch.Tensor):\n orig_type = x.dtype\n x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)\n return x.to(orig_type)" }, { "identifier": "QuickGELU", "path": "src/open_clip/eva_clip/transformer.py", "snippet": "class QuickGELU(nn.Module):\n # NOTE This is slower than nn.GELU or nn.SiLU and uses more GPU memory\n def forward(self, x: torch.Tensor):\n return x * torch.sigmoid(1.702 * x)" }, { "identifier": "Attention", "path": "src/open_clip/eva_clip/transformer.py", "snippet": "class Attention(nn.Module):\n def __init__(\n self,\n dim,\n num_heads=8,\n qkv_bias=True,\n scaled_cosine=False,\n scale_heads=False,\n logit_scale_max=math.log(1. / 0.01),\n attn_drop=0.,\n proj_drop=0.,\n xattn=False,\n rope=False\n ):\n super().__init__()\n self.scaled_cosine = scaled_cosine\n self.scale_heads = scale_heads\n assert dim % num_heads == 0, 'dim should be divisible by num_heads'\n self.num_heads = num_heads\n self.head_dim = dim // num_heads\n self.scale = self.head_dim ** -0.5\n self.logit_scale_max = logit_scale_max\n\n # keeping in_proj in this form (instead of nn.Linear) to match weight scheme of original\n self.in_proj_weight = nn.Parameter(torch.randn((dim * 3, dim)) * self.scale)\n if qkv_bias:\n self.in_proj_bias = nn.Parameter(torch.zeros(dim * 3))\n else:\n self.in_proj_bias = None\n\n if self.scaled_cosine:\n self.logit_scale = nn.Parameter(torch.log(10 * torch.ones((num_heads, 1, 1))))\n else:\n self.logit_scale = None\n self.attn_drop = nn.Dropout(attn_drop)\n if self.scale_heads:\n self.head_scale = nn.Parameter(torch.ones((num_heads, 1, 1)))\n else:\n self.head_scale = None\n self.out_proj = nn.Linear(dim, dim)\n self.out_drop = nn.Dropout(proj_drop)\n self.xattn = xattn\n self.xattn_drop = attn_drop\n self.rope = rope\n\n def forward(self, x, attn_mask: Optional[torch.Tensor] = None):\n L, N, C = x.shape\n q, k, v = F.linear(x, self.in_proj_weight, self.in_proj_bias).chunk(3, dim=-1)\n if self.xattn:\n q = q.contiguous().view(L, N, self.num_heads, -1).transpose(0, 1)\n k = k.contiguous().view(L, N, self.num_heads, -1).transpose(0, 1)\n v = v.contiguous().view(L, N, self.num_heads, -1).transpose(0, 1)\n\n x = xops.memory_efficient_attention(\n q, k, v,\n p=self.xattn_drop,\n scale=self.scale if self.logit_scale is None else None,\n attn_bias=xops.LowerTriangularMask() if attn_mask is not None else None,\n )\n else:\n q = q.contiguous().view(L, N * self.num_heads, -1).transpose(0, 1)\n k = k.contiguous().view(L, N * self.num_heads, -1).transpose(0, 1)\n v = v.contiguous().view(L, N * self.num_heads, -1).transpose(0, 1)\n\n if self.logit_scale is not None:\n attn = torch.bmm(F.normalize(q, dim=-1), F.normalize(k, dim=-1).transpose(-1, -2))\n logit_scale = torch.clamp(self.logit_scale, max=self.logit_scale_max).exp()\n attn = attn.view(N, self.num_heads, L, L) * logit_scale\n attn = attn.view(-1, L, L)\n else:\n q = q * self.scale\n attn = torch.bmm(q, k.transpose(-1, -2))\n\n if attn_mask is not None:\n if attn_mask.dtype == torch.bool:\n new_attn_mask = torch.zeros_like(attn_mask, dtype=q.dtype)\n new_attn_mask.masked_fill_(attn_mask, float(\"-inf\"))\n attn_mask = new_attn_mask\n attn += attn_mask\n\n attn = attn.softmax(dim=-1)\n attn = self.attn_drop(attn)\n\n x = torch.bmm(attn, v)\n\n if self.head_scale is not None:\n x = x.view(N, self.num_heads, L, C) * self.head_scale\n x = x.view(-1, L, C)\n x = x.transpose(0, 1).reshape(L, N, C)\n x = self.out_proj(x)\n x = self.out_drop(x)\n return x" }, { "identifier": "VisionTransformer", "path": "src/open_clip/eva_clip/transformer.py", "snippet": "class VisionTransformer(nn.Module):\n def __init__(\n self,\n image_size: int,\n patch_size: int,\n width: int,\n layers: int,\n heads: int,\n mlp_ratio: float,\n ls_init_value: float = None,\n patch_dropout: float = 0.,\n global_average_pool: bool = False,\n output_dim: int = 512,\n act_layer: Callable = nn.GELU,\n norm_layer: Callable = LayerNorm,\n xattn: bool = False,\n ):\n super().__init__()\n self.image_size = to_2tuple(image_size)\n self.patch_size = to_2tuple(patch_size)\n self.grid_size = (self.image_size[0] // self.patch_size[0], self.image_size[1] // self.patch_size[1])\n self.output_dim = output_dim\n self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)\n\n scale = width ** -0.5\n self.class_embedding = nn.Parameter(scale * torch.randn(width))\n self.positional_embedding = nn.Parameter(scale * torch.randn(self.grid_size[0] * self.grid_size[1] + 1, width))\n\n # setting a patch_dropout of 0. would mean it is disabled and this function would be the identity fn\n self.patch_dropout = PatchDropout(patch_dropout) if patch_dropout > 0. else nn.Identity()\n self.ln_pre = norm_layer(width)\n \n self.transformer = Transformer(\n width,\n layers,\n heads,\n mlp_ratio,\n ls_init_value=ls_init_value,\n act_layer=act_layer,\n norm_layer=norm_layer,\n xattn=xattn\n )\n\n self.global_average_pool = global_average_pool\n self.ln_post = norm_layer(width)\n self.proj = nn.Parameter(scale * torch.randn(width, output_dim))\n\n def lock(self, unlocked_groups=0, freeze_bn_stats=False):\n for param in self.parameters():\n param.requires_grad = False\n \n if unlocked_groups != 0:\n groups = [\n [\n self.conv1,\n self.class_embedding,\n self.positional_embedding,\n self.ln_pre,\n ],\n *self.transformer.resblocks[:-1],\n [\n self.transformer.resblocks[-1],\n self.ln_post,\n ],\n self.proj,\n ]\n\n def _unlock(x):\n if isinstance(x, Sequence):\n for g in x:\n _unlock(g)\n else:\n if isinstance(x, torch.nn.Parameter):\n x.requires_grad = True\n else:\n for p in x.parameters():\n p.requires_grad = True\n\n _unlock(groups[-unlocked_groups:])\n\n def get_num_layers(self):\n return self.transformer.layers\n\n @torch.jit.ignore\n def set_grad_checkpointing(self, enable=True):\n self.transformer.grad_checkpointing = enable\n\n @torch.jit.ignore\n def no_weight_decay(self):\n return {'positional_embedding', 'class_embedding'}\n\n def forward(self, x: torch.Tensor, return_all_features: bool=False):\n x = self.conv1(x) # shape = [*, width, grid, grid]\n x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]\n x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]\n x = torch.cat(\n [self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device),\n x], dim=1) # shape = [*, grid ** 2 + 1, width]\n x = x + self.positional_embedding.to(x.dtype)\n\n # a patch_dropout of 0. would mean it is disabled and this function would do nothing but return what was passed in\n x = self.patch_dropout(x)\n x = self.ln_pre(x)\n\n x = x.permute(1, 0, 2) # NLD -> LND\n x = self.transformer(x)\n x = x.permute(1, 0, 2) # LND -> NLD\n\n if not return_all_features:\n if self.global_average_pool:\n x = x.mean(dim=1) #x = x[:,1:,:].mean(dim=1)\n else:\n x = x[:, 0]\n\n x = self.ln_post(x)\n\n if self.proj is not None:\n x = x @ self.proj\n\n return x" }, { "identifier": "TextTransformer", "path": "src/open_clip/eva_clip/transformer.py", "snippet": "class TextTransformer(nn.Module):\n def __init__(\n self,\n context_length: int = 77,\n vocab_size: int = 49408,\n width: int = 512,\n heads: int = 8,\n layers: int = 12,\n ls_init_value: float = None,\n output_dim: int = 512,\n act_layer: Callable = nn.GELU,\n norm_layer: Callable = LayerNorm,\n xattn: bool= False,\n attn_mask: bool = True\n ):\n super().__init__()\n self.context_length = context_length\n self.vocab_size = vocab_size\n self.width = width\n self.output_dim = output_dim\n\n self.token_embedding = nn.Embedding(vocab_size, width)\n self.positional_embedding = nn.Parameter(torch.empty(self.context_length, width))\n self.transformer = Transformer(\n width=width,\n layers=layers,\n heads=heads,\n ls_init_value=ls_init_value,\n act_layer=act_layer,\n norm_layer=norm_layer,\n xattn=xattn\n )\n \n self.xattn = xattn\n self.ln_final = norm_layer(width)\n self.text_projection = nn.Parameter(torch.empty(width, output_dim))\n\n if attn_mask:\n self.register_buffer('attn_mask', self.build_attention_mask(), persistent=False)\n else:\n self.attn_mask = None\n\n self.init_parameters()\n\n def init_parameters(self):\n nn.init.normal_(self.token_embedding.weight, std=0.02)\n nn.init.normal_(self.positional_embedding, std=0.01)\n\n proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)\n attn_std = self.transformer.width ** -0.5\n fc_std = (2 * self.transformer.width) ** -0.5\n for block in self.transformer.resblocks:\n nn.init.normal_(block.attn.in_proj_weight, std=attn_std)\n nn.init.normal_(block.attn.out_proj.weight, std=proj_std)\n nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)\n nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)\n\n if self.text_projection is not None:\n nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)\n\n @torch.jit.ignore\n def set_grad_checkpointing(self, enable=True):\n self.transformer.grad_checkpointing = enable\n \n @torch.jit.ignore\n def no_weight_decay(self):\n # return {'positional_embedding', 'token_embedding'}\n return {'positional_embedding'}\n\n def get_num_layers(self):\n return self.transformer.layers\n\n def build_attention_mask(self):\n # lazily create causal attention mask, with full attention between the vision tokens\n # pytorch uses additive attention mask; fill with -inf\n mask = torch.empty(self.context_length, self.context_length)\n mask.fill_(float(\"-inf\"))\n mask.triu_(1) # zero out the lower diagonal\n return mask\n\n def forward(self, text, return_all_features: bool=False):\n cast_dtype = self.transformer.get_cast_dtype()\n x = self.token_embedding(text).to(cast_dtype) # [batch_size, n_ctx, d_model]\n\n x = x + self.positional_embedding.to(cast_dtype)\n x = x.permute(1, 0, 2) # NLD -> LND\n x = self.transformer(x, attn_mask=self.attn_mask)\n # x = self.transformer(x) # no attention mask is applied\n x = x.permute(1, 0, 2) # LND -> NLD\n x = self.ln_final(x)\n\n if not return_all_features:\n # x.shape = [batch_size, n_ctx, transformer.width]\n # take features from the eot embedding (eot_token is the highest number in each sequence)\n x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection\n return x\n\n def lock(self, *args, **kwargs):\n print(f'Freeze the text encoder', flush=True)\n for p in self.parameters():\n p.requires_grad = False" } ]
import os import numpy as np import torch import torch.nn.functional as F import xformers.ops as xops from dataclasses import dataclass from typing import Optional, Tuple, Union from functools import partial from torch import nn from .hf_model import HFTextEncoder from .modified_resnet import ModifiedResNet from .timm_model import TimmModel from .eva_vit_model import EVAVisionTransformer from .transformer import LayerNorm, QuickGELU, Attention, VisionTransformer, TextTransformer from apex.normalization import FusedLayerNorm
10,187
""" CLIP Model Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI. """ try: except: HFTextEncoder = None try: except: FusedLayerNorm = LayerNorm print("Please 'pip install apex'") try: except ImportError: xops = None print("Please 'pip install xformers'") @dataclass class CLIPVisionCfg: layers: Union[Tuple[int, int, int, int], int] = 12 width: int = 768 head_width: int = 64 mlp_ratio: float = 4.0 patch_size: int = 16 image_size: Union[Tuple[int, int], int] = 224 ls_init_value: Optional[float] = None # layer scale initial value patch_dropout: float = 0. # what fraction of patches to dropout during training (0 would mean disabled and no patches dropped) - 0.5 to 0.75 recommended in the paper for optimal results global_average_pool: bool = False # whether to global average pool the last embedding layer, instead of using CLS token (https://arxiv.org/abs/2205.01580) drop_path_rate: Optional[float] = None # drop path rate timm_model_name: str = None # a valid model name overrides layers, width, patch_size timm_model_pretrained: bool = False # use (imagenet) pretrained weights for named model timm_pool: str = 'avg' # feature pooling for timm model ('abs_attn', 'rot_attn', 'avg', '') timm_proj: str = 'linear' # linear projection for timm model output ('linear', 'mlp', '') timm_proj_bias: bool = False # enable bias final projection eva_model_name: str = None # a valid eva model name overrides layers, width, patch_size qkv_bias: bool = True fusedLN: bool = False xattn: bool = False postnorm: bool = False rope: bool = False pt_hw_seq_len: int = 16 # 224/14 intp_freq: bool = False naiveswiglu: bool = False subln: bool = False @dataclass class CLIPTextCfg: context_length: int = 77 vocab_size: int = 49408 width: int = 512 heads: int = 8 layers: int = 12 ls_init_value: Optional[float] = None # layer scale initial value hf_model_name: str = None hf_tokenizer_name: str = None hf_model_pretrained: bool = True proj: str = 'mlp' pooler_type: str = 'mean_pooler' masked_language_modeling: bool = False fusedLN: bool = False xattn: bool = False attn_mask: bool = True def get_cast_dtype(precision: str): cast_dtype = None if precision == 'bf16': cast_dtype = torch.bfloat16 elif precision == 'fp16': cast_dtype = torch.float16 return cast_dtype def _build_vision_tower( embed_dim: int, vision_cfg: CLIPVisionCfg, quick_gelu: bool = False, cast_dtype: Optional[torch.dtype] = None ): if isinstance(vision_cfg, dict): vision_cfg = CLIPVisionCfg(**vision_cfg) # OpenAI models are pretrained w/ QuickGELU but native nn.GELU is both faster and more # memory efficient in recent PyTorch releases (>= 1.10). # NOTE: timm models always use native GELU regardless of quick_gelu flag. act_layer = QuickGELU if quick_gelu else nn.GELU if vision_cfg.eva_model_name: vision_heads = vision_cfg.width // vision_cfg.head_width norm_layer = LayerNorm
""" CLIP Model Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI. """ try: except: HFTextEncoder = None try: except: FusedLayerNorm = LayerNorm print("Please 'pip install apex'") try: except ImportError: xops = None print("Please 'pip install xformers'") @dataclass class CLIPVisionCfg: layers: Union[Tuple[int, int, int, int], int] = 12 width: int = 768 head_width: int = 64 mlp_ratio: float = 4.0 patch_size: int = 16 image_size: Union[Tuple[int, int], int] = 224 ls_init_value: Optional[float] = None # layer scale initial value patch_dropout: float = 0. # what fraction of patches to dropout during training (0 would mean disabled and no patches dropped) - 0.5 to 0.75 recommended in the paper for optimal results global_average_pool: bool = False # whether to global average pool the last embedding layer, instead of using CLS token (https://arxiv.org/abs/2205.01580) drop_path_rate: Optional[float] = None # drop path rate timm_model_name: str = None # a valid model name overrides layers, width, patch_size timm_model_pretrained: bool = False # use (imagenet) pretrained weights for named model timm_pool: str = 'avg' # feature pooling for timm model ('abs_attn', 'rot_attn', 'avg', '') timm_proj: str = 'linear' # linear projection for timm model output ('linear', 'mlp', '') timm_proj_bias: bool = False # enable bias final projection eva_model_name: str = None # a valid eva model name overrides layers, width, patch_size qkv_bias: bool = True fusedLN: bool = False xattn: bool = False postnorm: bool = False rope: bool = False pt_hw_seq_len: int = 16 # 224/14 intp_freq: bool = False naiveswiglu: bool = False subln: bool = False @dataclass class CLIPTextCfg: context_length: int = 77 vocab_size: int = 49408 width: int = 512 heads: int = 8 layers: int = 12 ls_init_value: Optional[float] = None # layer scale initial value hf_model_name: str = None hf_tokenizer_name: str = None hf_model_pretrained: bool = True proj: str = 'mlp' pooler_type: str = 'mean_pooler' masked_language_modeling: bool = False fusedLN: bool = False xattn: bool = False attn_mask: bool = True def get_cast_dtype(precision: str): cast_dtype = None if precision == 'bf16': cast_dtype = torch.bfloat16 elif precision == 'fp16': cast_dtype = torch.float16 return cast_dtype def _build_vision_tower( embed_dim: int, vision_cfg: CLIPVisionCfg, quick_gelu: bool = False, cast_dtype: Optional[torch.dtype] = None ): if isinstance(vision_cfg, dict): vision_cfg = CLIPVisionCfg(**vision_cfg) # OpenAI models are pretrained w/ QuickGELU but native nn.GELU is both faster and more # memory efficient in recent PyTorch releases (>= 1.10). # NOTE: timm models always use native GELU regardless of quick_gelu flag. act_layer = QuickGELU if quick_gelu else nn.GELU if vision_cfg.eva_model_name: vision_heads = vision_cfg.width // vision_cfg.head_width norm_layer = LayerNorm
visual = EVAVisionTransformer(
2
2023-12-09 05:43:08+00:00
12k
moonshot-admin/moonshot
third-party/pathspec-0.12.1/pathspec/gitignore.py
[ { "identifier": "PathSpec", "path": "third-party/pathspec-0.12.1/pathspec/pathspec.py", "snippet": "class PathSpec(object):\n\t\"\"\"\n\tThe :class:`PathSpec` class is a wrapper around a list of compiled\n\t:class:`.Pattern` instances.\n\t\"\"\"\n\n\tdef __init__(self, patterns: Iterable[Pattern]) -> None:\n\t\t\"\"\"\n\t\tInitializes the :class:`PathSpec` instance.\n\n\t\t*patterns* (:class:`~collections.abc.Collection` or :class:`~collections.abc.Iterable`)\n\t\tyields each compiled pattern (:class:`.Pattern`).\n\t\t\"\"\"\n\t\tif not isinstance(patterns, CollectionType):\n\t\t\tpatterns = list(patterns)\n\n\t\tself.patterns: Collection[Pattern] = patterns\n\t\t\"\"\"\n\t\t*patterns* (:class:`~collections.abc.Collection` of :class:`.Pattern`)\n\t\tcontains the compiled patterns.\n\t\t\"\"\"\n\n\tdef __eq__(self, other: object) -> bool:\n\t\t\"\"\"\n\t\tTests the equality of this path-spec with *other* (:class:`PathSpec`)\n\t\tby comparing their :attr:`~PathSpec.patterns` attributes.\n\t\t\"\"\"\n\t\tif isinstance(other, PathSpec):\n\t\t\tpaired_patterns = zip_longest(self.patterns, other.patterns)\n\t\t\treturn all(a == b for a, b in paired_patterns)\n\t\telse:\n\t\t\treturn NotImplemented\n\n\tdef __len__(self) -> int:\n\t\t\"\"\"\n\t\tReturns the number of compiled patterns this path-spec contains\n\t\t(:class:`int`).\n\t\t\"\"\"\n\t\treturn len(self.patterns)\n\n\tdef __add__(self: Self, other: \"PathSpec\") -> Self:\n\t\t\"\"\"\n\t\tCombines the :attr:`Pathspec.patterns` patterns from two\n\t\t:class:`PathSpec` instances.\n\t\t\"\"\"\n\t\tif isinstance(other, PathSpec):\n\t\t\treturn self.__class__(self.patterns + other.patterns)\n\t\telse:\n\t\t\treturn NotImplemented\n\n\tdef __iadd__(self: Self, other: \"PathSpec\") -> Self:\n\t\t\"\"\"\n\t\tAdds the :attr:`Pathspec.patterns` patterns from one :class:`PathSpec`\n\t\tinstance to this instance.\n\t\t\"\"\"\n\t\tif isinstance(other, PathSpec):\n\t\t\tself.patterns += other.patterns\n\t\t\treturn self\n\t\telse:\n\t\t\treturn NotImplemented\n\n\tdef check_file(\n\t\tself,\n\t\tfile: TStrPath,\n\t\tseparators: Optional[Collection[str]] = None,\n\t) -> CheckResult[TStrPath]:\n\t\t\"\"\"\n\t\tCheck the files against this path-spec.\n\n\t\t*file* (:class:`str` or :class:`os.PathLike`) is the file path to be\n\t\tmatched against :attr:`self.patterns <PathSpec.patterns>`.\n\n\t\t*separators* (:class:`~collections.abc.Collection` of :class:`str`; or\n\t\t:data:`None`) optionally contains the path separators to normalize. See\n\t\t:func:`~pathspec.util.normalize_file` for more information.\n\n\t\tReturns the file check result (:class:`~pathspec.util.CheckResult`).\n\t\t\"\"\"\n\t\tnorm_file = normalize_file(file, separators)\n\t\tinclude, index = self._match_file(enumerate(self.patterns), norm_file)\n\t\treturn CheckResult(file, include, index)\n\n\tdef check_files(\n\t\tself,\n\t\tfiles: Iterable[TStrPath],\n\t\tseparators: Optional[Collection[str]] = None,\n\t) -> Iterator[CheckResult[TStrPath]]:\n\t\t\"\"\"\n\t\tCheck the files against this path-spec.\n\n\t\t*files* (:class:`~collections.abc.Iterable` of :class:`str` or\n\t\t:class:`os.PathLike`) contains the file paths to be checked against\n\t\t:attr:`self.patterns <PathSpec.patterns>`.\n\n\t\t*separators* (:class:`~collections.abc.Collection` of :class:`str`; or\n\t\t:data:`None`) optionally contains the path separators to normalize. See\n\t\t:func:`~pathspec.util.normalize_file` for more information.\n\n\t\tReturns an :class:`~collections.abc.Iterator` yielding each file check\n\t\tresult (:class:`~pathspec.util.CheckResult`).\n\t\t\"\"\"\n\t\tif not _is_iterable(files):\n\t\t\traise TypeError(f\"files:{files!r} is not an iterable.\")\n\n\t\tuse_patterns = _filter_check_patterns(self.patterns)\n\t\tfor orig_file in files:\n\t\t\tnorm_file = normalize_file(orig_file, separators)\n\t\t\tinclude, index = self._match_file(use_patterns, norm_file)\n\t\t\tyield CheckResult(orig_file, include, index)\n\n\tdef check_tree_files(\n\t\tself,\n\t\troot: StrPath,\n\t\ton_error: Optional[Callable[[OSError], None]] = None,\n\t\tfollow_links: Optional[bool] = None,\n\t) -> Iterator[CheckResult[str]]:\n\t\t\"\"\"\n\t\tWalks the specified root path for all files and checks them against this\n\t\tpath-spec.\n\n\t\t*root* (:class:`str` or :class:`os.PathLike`) is the root directory to\n\t\tsearch for files.\n\n\t\t*on_error* (:class:`~collections.abc.Callable` or :data:`None`) optionally\n\t\tis the error handler for file-system exceptions. It will be called with the\n\t\texception (:exc:`OSError`). Reraise the exception to abort the walk. Default\n\t\tis :data:`None` to ignore file-system exceptions.\n\n\t\t*follow_links* (:class:`bool` or :data:`None`) optionally is whether to walk\n\t\tsymbolic links that resolve to directories. Default is :data:`None` for\n\t\t:data:`True`.\n\n\t\t*negate* (:class:`bool` or :data:`None`) is whether to negate the match\n\t\tresults of the patterns. If :data:`True`, a pattern matching a file will\n\t\texclude the file rather than include it. Default is :data:`None` for\n\t\t:data:`False`.\n\n\t\tReturns an :class:`~collections.abc.Iterator` yielding each file check\n\t\tresult (:class:`~pathspec.util.CheckResult`).\n\t\t\"\"\"\n\t\tfiles = util.iter_tree_files(root, on_error=on_error, follow_links=follow_links)\n\t\tyield from self.check_files(files)\n\n\t@classmethod\n\tdef from_lines(\n\t\tcls: Type[Self],\n\t\tpattern_factory: Union[str, Callable[[AnyStr], Pattern]],\n\t\tlines: Iterable[AnyStr],\n\t) -> Self:\n\t\t\"\"\"\n\t\tCompiles the pattern lines.\n\n\t\t*pattern_factory* can be either the name of a registered pattern factory\n\t\t(:class:`str`), or a :class:`~collections.abc.Callable` used to compile\n\t\tpatterns. It must accept an uncompiled pattern (:class:`str`) and return the\n\t\tcompiled pattern (:class:`.Pattern`).\n\n\t\t*lines* (:class:`~collections.abc.Iterable`) yields each uncompiled pattern\n\t\t(:class:`str`). This simply has to yield each line so that it can be a\n\t\t:class:`io.TextIOBase` (e.g., from :func:`open` or :class:`io.StringIO`) or\n\t\tthe result from :meth:`str.splitlines`.\n\n\t\tReturns the :class:`PathSpec` instance.\n\t\t\"\"\"\n\t\tif isinstance(pattern_factory, str):\n\t\t\tpattern_factory = util.lookup_pattern(pattern_factory)\n\n\t\tif not callable(pattern_factory):\n\t\t\traise TypeError(f\"pattern_factory:{pattern_factory!r} is not callable.\")\n\n\t\tif not _is_iterable(lines):\n\t\t\traise TypeError(f\"lines:{lines!r} is not an iterable.\")\n\n\t\tpatterns = [pattern_factory(line) for line in lines if line]\n\t\treturn cls(patterns)\n\n\tdef match_entries(\n\t\tself,\n\t\tentries: Iterable[TreeEntry],\n\t\tseparators: Optional[Collection[str]] = None,\n\t\t*,\n\t\tnegate: Optional[bool] = None,\n\t) -> Iterator[TreeEntry]:\n\t\t\"\"\"\n\t\tMatches the entries to this path-spec.\n\n\t\t*entries* (:class:`~collections.abc.Iterable` of :class:`~pathspec.util.TreeEntry`)\n\t\tcontains the entries to be matched against :attr:`self.patterns <PathSpec.patterns>`.\n\n\t\t*separators* (:class:`~collections.abc.Collection` of :class:`str`; or\n\t\t:data:`None`) optionally contains the path separators to normalize. See\n\t\t:func:`~pathspec.util.normalize_file` for more information.\n\n\t\t*negate* (:class:`bool` or :data:`None`) is whether to negate the match\n\t\tresults of the patterns. If :data:`True`, a pattern matching a file will\n\t\texclude the file rather than include it. Default is :data:`None` for\n\t\t:data:`False`.\n\n\t\tReturns the matched entries (:class:`~collections.abc.Iterator` of\n\t\t:class:`~pathspec.util.TreeEntry`).\n\t\t\"\"\"\n\t\tif not _is_iterable(entries):\n\t\t\traise TypeError(f\"entries:{entries!r} is not an iterable.\")\n\n\t\tuse_patterns = _filter_check_patterns(self.patterns)\n\t\tfor entry in entries:\n\t\t\tnorm_file = normalize_file(entry.path, separators)\n\t\t\tinclude, _index = self._match_file(use_patterns, norm_file)\n\n\t\t\tif negate:\n\t\t\t\tinclude = not include\n\n\t\t\tif include:\n\t\t\t\tyield entry\n\n\t_match_file = staticmethod(util.check_match_file)\n\t\"\"\"\n\tMatch files using the `check_match_file()` utility function. Subclasses may\n\toverride this method as an instance method. It does not have to be a static\n\tmethod. The signature for this method is subject to change.\n\t\"\"\"\n\n\tdef match_file(\n\t\tself,\n\t\tfile: StrPath,\n\t\tseparators: Optional[Collection[str]] = None,\n\t) -> bool:\n\t\t\"\"\"\n\t\tMatches the file to this path-spec.\n\n\t\t*file* (:class:`str` or :class:`os.PathLike`) is the file path to be\n\t\tmatched against :attr:`self.patterns <PathSpec.patterns>`.\n\n\t\t*separators* (:class:`~collections.abc.Collection` of :class:`str`)\n\t\toptionally contains the path separators to normalize. See\n\t\t:func:`~pathspec.util.normalize_file` for more information.\n\n\t\tReturns :data:`True` if *file* matched; otherwise, :data:`False`.\n\t\t\"\"\"\n\t\tnorm_file = normalize_file(file, separators)\n\t\tinclude, _index = self._match_file(enumerate(self.patterns), norm_file)\n\t\treturn bool(include)\n\n\tdef match_files(\n\t\tself,\n\t\tfiles: Iterable[StrPath],\n\t\tseparators: Optional[Collection[str]] = None,\n\t\t*,\n\t\tnegate: Optional[bool] = None,\n\t) -> Iterator[StrPath]:\n\t\t\"\"\"\n\t\tMatches the files to this path-spec.\n\n\t\t*files* (:class:`~collections.abc.Iterable` of :class:`str` or\n\t\t:class:`os.PathLike`) contains the file paths to be matched against\n\t\t:attr:`self.patterns <PathSpec.patterns>`.\n\n\t\t*separators* (:class:`~collections.abc.Collection` of :class:`str`; or\n\t\t:data:`None`) optionally contains the path separators to normalize. See\n\t\t:func:`~pathspec.util.normalize_file` for more information.\n\n\t\t*negate* (:class:`bool` or :data:`None`) is whether to negate the match\n\t\tresults of the patterns. If :data:`True`, a pattern matching a file will\n\t\texclude the file rather than include it. Default is :data:`None` for\n\t\t:data:`False`.\n\n\t\tReturns the matched files (:class:`~collections.abc.Iterator` of\n\t\t:class:`str` or :class:`os.PathLike`).\n\t\t\"\"\"\n\t\tif not _is_iterable(files):\n\t\t\traise TypeError(f\"files:{files!r} is not an iterable.\")\n\n\t\tuse_patterns = _filter_check_patterns(self.patterns)\n\t\tfor orig_file in files:\n\t\t\tnorm_file = normalize_file(orig_file, separators)\n\t\t\tinclude, _index = self._match_file(use_patterns, norm_file)\n\n\t\t\tif negate:\n\t\t\t\tinclude = not include\n\n\t\t\tif include:\n\t\t\t\tyield orig_file\n\n\tdef match_tree_entries(\n\t\tself,\n\t\troot: StrPath,\n\t\ton_error: Optional[Callable[[OSError], None]] = None,\n\t\tfollow_links: Optional[bool] = None,\n\t\t*,\n\t\tnegate: Optional[bool] = None,\n\t) -> Iterator[TreeEntry]:\n\t\t\"\"\"\n\t\tWalks the specified root path for all files and matches them to this\n\t\tpath-spec.\n\n\t\t*root* (:class:`str` or :class:`os.PathLike`) is the root directory to\n\t\tsearch.\n\n\t\t*on_error* (:class:`~collections.abc.Callable` or :data:`None`) optionally\n\t\tis the error handler for file-system exceptions. It will be called with the\n\t\texception (:exc:`OSError`). Reraise the exception to abort the walk. Default\n\t\tis :data:`None` to ignore file-system exceptions.\n\n\t\t*follow_links* (:class:`bool` or :data:`None`) optionally is whether to walk\n\t\tsymbolic links that resolve to directories. Default is :data:`None` for\n\t\t:data:`True`.\n\n\t\t*negate* (:class:`bool` or :data:`None`) is whether to negate the match\n\t\tresults of the patterns. If :data:`True`, a pattern matching a file will\n\t\texclude the file rather than include it. Default is :data:`None` for\n\t\t:data:`False`.\n\n\t\tReturns the matched files (:class:`~collections.abc.Iterator` of\n\t\t:class:`.TreeEntry`).\n\t\t\"\"\"\n\t\tentries = util.iter_tree_entries(root, on_error=on_error, follow_links=follow_links)\n\t\tyield from self.match_entries(entries, negate=negate)\n\n\tdef match_tree_files(\n\t\tself,\n\t\troot: StrPath,\n\t\ton_error: Optional[Callable[[OSError], None]] = None,\n\t\tfollow_links: Optional[bool] = None,\n\t\t*,\n\t\tnegate: Optional[bool] = None,\n\t) -> Iterator[str]:\n\t\t\"\"\"\n\t\tWalks the specified root path for all files and matches them to this\n\t\tpath-spec.\n\n\t\t*root* (:class:`str` or :class:`os.PathLike`) is the root directory to\n\t\tsearch for files.\n\n\t\t*on_error* (:class:`~collections.abc.Callable` or :data:`None`) optionally\n\t\tis the error handler for file-system exceptions. It will be called with the\n\t\texception (:exc:`OSError`). Reraise the exception to abort the walk. Default\n\t\tis :data:`None` to ignore file-system exceptions.\n\n\t\t*follow_links* (:class:`bool` or :data:`None`) optionally is whether to walk\n\t\tsymbolic links that resolve to directories. Default is :data:`None` for\n\t\t:data:`True`.\n\n\t\t*negate* (:class:`bool` or :data:`None`) is whether to negate the match\n\t\tresults of the patterns. If :data:`True`, a pattern matching a file will\n\t\texclude the file rather than include it. Default is :data:`None` for\n\t\t:data:`False`.\n\n\t\tReturns the matched files (:class:`~collections.abc.Iterable` of\n\t\t:class:`str`).\n\t\t\"\"\"\n\t\tfiles = util.iter_tree_files(root, on_error=on_error, follow_links=follow_links)\n\t\tyield from self.match_files(files, negate=negate)\n\n\t# Alias `match_tree_files()` as `match_tree()` for backward compatibility\n\t# before v0.3.2.\n\tmatch_tree = match_tree_files" }, { "identifier": "Pattern", "path": "third-party/pathspec-0.12.1/pathspec/pattern.py", "snippet": "class Pattern(object):\n\t\"\"\"\n\tThe :class:`Pattern` class is the abstract definition of a pattern.\n\t\"\"\"\n\n\t# Make the class dict-less.\n\t__slots__ = (\n\t\t'include',\n\t)\n\n\tdef __init__(self, include: Optional[bool]) -> None:\n\t\t\"\"\"\n\t\tInitializes the :class:`Pattern` instance.\n\n\t\t*include* (:class:`bool` or :data:`None`) is whether the matched files\n\t\tshould be included (:data:`True`), excluded (:data:`False`), or is a\n\t\tnull-operation (:data:`None`).\n\t\t\"\"\"\n\n\t\tself.include = include\n\t\t\"\"\"\n\t\t*include* (:class:`bool` or :data:`None`) is whether the matched files\n\t\tshould be included (:data:`True`), excluded (:data:`False`), or is a\n\t\tnull-operation (:data:`None`).\n\t\t\"\"\"\n\n\tdef match(self, files: Iterable[str]) -> Iterator[str]:\n\t\t\"\"\"\n\t\tDEPRECATED: This method is no longer used and has been replaced by\n\t\t:meth:`.match_file`. Use the :meth:`.match_file` method with a loop for\n\t\tsimilar results.\n\n\t\tMatches this pattern against the specified files.\n\n\t\t*files* (:class:`~collections.abc.Iterable` of :class:`str`) contains each\n\t\tfile relative to the root directory (e.g., ``\"relative/path/to/file\"``).\n\n\t\tReturns an :class:`~collections.abc.Iterable` yielding each matched file\n\t\tpath (:class:`str`).\n\t\t\"\"\"\n\t\twarnings.warn((\n\t\t\t\"{cls.__module__}.{cls.__qualname__}.match() is deprecated. Use \"\n\t\t\t\"{cls.__module__}.{cls.__qualname__}.match_file() with a loop for \"\n\t\t\t\"similar results.\"\n\t\t).format(cls=self.__class__), DeprecationWarning, stacklevel=2)\n\n\t\tfor file in files:\n\t\t\tif self.match_file(file) is not None:\n\t\t\t\tyield file\n\n\tdef match_file(self, file: str) -> Optional[Any]:\n\t\t\"\"\"\n\t\tMatches this pattern against the specified file.\n\n\t\t*file* (:class:`str`) is the normalized file path to match against.\n\n\t\tReturns the match result if *file* matched; otherwise, :data:`None`.\n\t\t\"\"\"\n\t\traise NotImplementedError((\n\t\t\t\"{cls.__module__}.{cls.__qualname__} must override match_file().\"\n\t\t).format(cls=self.__class__))" }, { "identifier": "GitWildMatchPattern", "path": "third-party/pathspec-0.12.1/pathspec/patterns/gitwildmatch.py", "snippet": "class GitWildMatchPattern(RegexPattern):\n\t\"\"\"\n\tThe :class:`GitWildMatchPattern` class represents a compiled Git wildmatch\n\tpattern.\n\t\"\"\"\n\n\t# Keep the dict-less class hierarchy.\n\t__slots__ = ()\n\n\t@classmethod\n\tdef pattern_to_regex(\n\t\tcls,\n\t\tpattern: AnyStr,\n\t) -> Tuple[Optional[AnyStr], Optional[bool]]:\n\t\t\"\"\"\n\t\tConvert the pattern into a regular expression.\n\n\t\t*pattern* (:class:`str` or :class:`bytes`) is the pattern to convert into a\n\t\tregular expression.\n\n\t\tReturns the uncompiled regular expression (:class:`str`, :class:`bytes`, or\n\t\t:data:`None`); and whether matched files should be included (:data:`True`),\n\t\texcluded (:data:`False`), or if it is a null-operation (:data:`None`).\n\t\t\"\"\"\n\t\tif isinstance(pattern, str):\n\t\t\treturn_type = str\n\t\telif isinstance(pattern, bytes):\n\t\t\treturn_type = bytes\n\t\t\tpattern = pattern.decode(_BYTES_ENCODING)\n\t\telse:\n\t\t\traise TypeError(f\"pattern:{pattern!r} is not a unicode or byte string.\")\n\n\t\toriginal_pattern = pattern\n\n\t\tif pattern.endswith('\\\\ '):\n\t\t\t# EDGE CASE: Spaces can be escaped with backslash. If a pattern that ends\n\t\t\t# with backslash followed by a space, only strip from left.\n\t\t\tpattern = pattern.lstrip()\n\t\telse:\n\t\t\tpattern = pattern.strip()\n\n\t\tif pattern.startswith('#'):\n\t\t\t# A pattern starting with a hash ('#') serves as a comment (neither\n\t\t\t# includes nor excludes files). Escape the hash with a back-slash to match\n\t\t\t# a literal hash (i.e., '\\#').\n\t\t\tregex = None\n\t\t\tinclude = None\n\n\t\telif pattern == '/':\n\t\t\t# EDGE CASE: According to `git check-ignore` (v2.4.1), a single '/' does\n\t\t\t# not match any file.\n\t\t\tregex = None\n\t\t\tinclude = None\n\n\t\telif pattern:\n\t\t\tif pattern.startswith('!'):\n\t\t\t\t# A pattern starting with an exclamation mark ('!') negates the pattern\n\t\t\t\t# (exclude instead of include). Escape the exclamation mark with a\n\t\t\t\t# back-slash to match a literal exclamation mark (i.e., '\\!').\n\t\t\t\tinclude = False\n\t\t\t\t# Remove leading exclamation mark.\n\t\t\t\tpattern = pattern[1:]\n\t\t\telse:\n\t\t\t\tinclude = True\n\n\t\t\t# Allow a regex override for edge cases that cannot be handled through\n\t\t\t# normalization.\n\t\t\toverride_regex = None\n\n\t\t\t# Split pattern into segments.\n\t\t\tpattern_segs = pattern.split('/')\n\n\t\t\t# Check whether the pattern is specifically a directory pattern before\n\t\t\t# normalization.\n\t\t\tis_dir_pattern = not pattern_segs[-1]\n\n\t\t\t# Normalize pattern to make processing easier.\n\n\t\t\t# EDGE CASE: Deal with duplicate double-asterisk sequences. Collapse each\n\t\t\t# sequence down to one double-asterisk. Iterate over the segments in\n\t\t\t# reverse and remove the duplicate double asterisks as we go.\n\t\t\tfor i in range(len(pattern_segs) - 1, 0, -1):\n\t\t\t\tprev = pattern_segs[i-1]\n\t\t\t\tseg = pattern_segs[i]\n\t\t\t\tif prev == '**' and seg == '**':\n\t\t\t\t\tdel pattern_segs[i]\n\n\t\t\tif len(pattern_segs) == 2 and pattern_segs[0] == '**' and not pattern_segs[1]:\n\t\t\t\t# EDGE CASE: The '**/' pattern should match everything except individual\n\t\t\t\t# files in the root directory. This case cannot be adequately handled\n\t\t\t\t# through normalization. Use the override.\n\t\t\t\toverride_regex = f'^.+(?P<{_DIR_MARK}>/).*$'\n\n\t\t\tif not pattern_segs[0]:\n\t\t\t\t# A pattern beginning with a slash ('/') will only match paths directly\n\t\t\t\t# on the root directory instead of any descendant paths. So, remove\n\t\t\t\t# empty first segment to make pattern relative to root.\n\t\t\t\tdel pattern_segs[0]\n\n\t\t\telif len(pattern_segs) == 1 or (len(pattern_segs) == 2 and not pattern_segs[1]):\n\t\t\t\t# A single pattern without a beginning slash ('/') will match any\n\t\t\t\t# descendant path. This is equivalent to \"**/{pattern}\". So, prepend\n\t\t\t\t# with double-asterisks to make pattern relative to root.\n\t\t\t\t# - EDGE CASE: This also holds for a single pattern with a trailing\n\t\t\t\t# slash (e.g. dir/).\n\t\t\t\tif pattern_segs[0] != '**':\n\t\t\t\t\tpattern_segs.insert(0, '**')\n\n\t\t\telse:\n\t\t\t\t# EDGE CASE: A pattern without a beginning slash ('/') but contains at\n\t\t\t\t# least one prepended directory (e.g. \"dir/{pattern}\") should not match\n\t\t\t\t# \"**/dir/{pattern}\", according to `git check-ignore` (v2.4.1).\n\t\t\t\tpass\n\n\t\t\tif not pattern_segs:\n\t\t\t\t# After resolving the edge cases, we end up with no pattern at all. This\n\t\t\t\t# must be because the pattern is invalid.\n\t\t\t\traise GitWildMatchPatternError(f\"Invalid git pattern: {original_pattern!r}\")\n\n\t\t\tif not pattern_segs[-1] and len(pattern_segs) > 1:\n\t\t\t\t# A pattern ending with a slash ('/') will match all descendant paths if\n\t\t\t\t# it is a directory but not if it is a regular file. This is equivalent\n\t\t\t\t# to \"{pattern}/**\". So, set last segment to a double-asterisk to\n\t\t\t\t# include all descendants.\n\t\t\t\tpattern_segs[-1] = '**'\n\n\t\t\tif override_regex is None:\n\t\t\t\t# Build regular expression from pattern.\n\t\t\t\toutput = ['^']\n\t\t\t\tneed_slash = False\n\t\t\t\tend = len(pattern_segs) - 1\n\t\t\t\tfor i, seg in enumerate(pattern_segs):\n\t\t\t\t\tif seg == '**':\n\t\t\t\t\t\tif i == 0 and i == end:\n\t\t\t\t\t\t\t# A pattern consisting solely of double-asterisks ('**') will\n\t\t\t\t\t\t\t# match every path.\n\t\t\t\t\t\t\toutput.append(f'[^/]+(?:/.*)?')\n\n\t\t\t\t\t\telif i == 0:\n\t\t\t\t\t\t\t# A normalized pattern beginning with double-asterisks\n\t\t\t\t\t\t\t# ('**') will match any leading path segments.\n\t\t\t\t\t\t\toutput.append('(?:.+/)?')\n\t\t\t\t\t\t\tneed_slash = False\n\n\t\t\t\t\t\telif i == end:\n\t\t\t\t\t\t\t# A normalized pattern ending with double-asterisks ('**') will\n\t\t\t\t\t\t\t# match any trailing path segments.\n\t\t\t\t\t\t\tif is_dir_pattern:\n\t\t\t\t\t\t\t\toutput.append(f'(?P<{_DIR_MARK}>/).*')\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\toutput.append(f'/.*')\n\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t# A pattern with inner double-asterisks ('**') will match multiple\n\t\t\t\t\t\t\t# (or zero) inner path segments.\n\t\t\t\t\t\t\toutput.append('(?:/.+)?')\n\t\t\t\t\t\t\tneed_slash = True\n\n\t\t\t\t\telif seg == '*':\n\t\t\t\t\t\t# Match single path segment.\n\t\t\t\t\t\tif need_slash:\n\t\t\t\t\t\t\toutput.append('/')\n\n\t\t\t\t\t\toutput.append('[^/]+')\n\n\t\t\t\t\t\tif i == end:\n\t\t\t\t\t\t\t# A pattern ending without a slash ('/') will match a file or a\n\t\t\t\t\t\t\t# directory (with paths underneath it). E.g., \"foo\" matches \"foo\",\n\t\t\t\t\t\t\t# \"foo/bar\", \"foo/bar/baz\", etc.\n\t\t\t\t\t\t\toutput.append(f'(?:(?P<{_DIR_MARK}>/).*)?')\n\n\t\t\t\t\t\tneed_slash = True\n\n\t\t\t\t\telse:\n\t\t\t\t\t\t# Match segment glob pattern.\n\t\t\t\t\t\tif need_slash:\n\t\t\t\t\t\t\toutput.append('/')\n\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\toutput.append(cls._translate_segment_glob(seg))\n\t\t\t\t\t\texcept ValueError as e:\n\t\t\t\t\t\t\traise GitWildMatchPatternError(f\"Invalid git pattern: {original_pattern!r}\") from e\n\n\t\t\t\t\t\tif i == end:\n\t\t\t\t\t\t\t# A pattern ending without a slash ('/') will match a file or a\n\t\t\t\t\t\t\t# directory (with paths underneath it). E.g., \"foo\" matches \"foo\",\n\t\t\t\t\t\t\t# \"foo/bar\", \"foo/bar/baz\", etc.\n\t\t\t\t\t\t\toutput.append(f'(?:(?P<{_DIR_MARK}>/).*)?')\n\n\t\t\t\t\t\tneed_slash = True\n\n\t\t\t\toutput.append('$')\n\t\t\t\tregex = ''.join(output)\n\n\t\t\telse:\n\t\t\t\t# Use regex override.\n\t\t\t\tregex = override_regex\n\n\t\telse:\n\t\t\t# A blank pattern is a null-operation (neither includes nor excludes\n\t\t\t# files).\n\t\t\tregex = None\n\t\t\tinclude = None\n\n\t\tif regex is not None and return_type is bytes:\n\t\t\tregex = regex.encode(_BYTES_ENCODING)\n\n\t\treturn regex, include\n\n\t@staticmethod\n\tdef _translate_segment_glob(pattern: str) -> str:\n\t\t\"\"\"\n\t\tTranslates the glob pattern to a regular expression. This is used in the\n\t\tconstructor to translate a path segment glob pattern to its corresponding\n\t\tregular expression.\n\n\t\t*pattern* (:class:`str`) is the glob pattern.\n\n\t\tReturns the regular expression (:class:`str`).\n\t\t\"\"\"\n\t\t# NOTE: This is derived from `fnmatch.translate()` and is similar to the\n\t\t# POSIX function `fnmatch()` with the `FNM_PATHNAME` flag set.\n\n\t\tescape = False\n\t\tregex = ''\n\t\ti, end = 0, len(pattern)\n\t\twhile i < end:\n\t\t\t# Get next character.\n\t\t\tchar = pattern[i]\n\t\t\ti += 1\n\n\t\t\tif escape:\n\t\t\t\t# Escape the character.\n\t\t\t\tescape = False\n\t\t\t\tregex += re.escape(char)\n\n\t\t\telif char == '\\\\':\n\t\t\t\t# Escape character, escape next character.\n\t\t\t\tescape = True\n\n\t\t\telif char == '*':\n\t\t\t\t# Multi-character wildcard. Match any string (except slashes), including\n\t\t\t\t# an empty string.\n\t\t\t\tregex += '[^/]*'\n\n\t\t\telif char == '?':\n\t\t\t\t# Single-character wildcard. Match any single character (except a\n\t\t\t\t# slash).\n\t\t\t\tregex += '[^/]'\n\n\t\t\telif char == '[':\n\t\t\t\t# Bracket expression wildcard. Except for the beginning exclamation\n\t\t\t\t# mark, the whole bracket expression can be used directly as regex, but\n\t\t\t\t# we have to find where the expression ends.\n\t\t\t\t# - \"[][!]\" matches ']', '[' and '!'.\n\t\t\t\t# - \"[]-]\" matches ']' and '-'.\n\t\t\t\t# - \"[!]a-]\" matches any character except ']', 'a' and '-'.\n\t\t\t\tj = i\n\n\t\t\t\t# Pass bracket expression negation.\n\t\t\t\tif j < end and (pattern[j] == '!' or pattern[j] == '^'):\n\t\t\t\t\tj += 1\n\n\t\t\t\t# Pass first closing bracket if it is at the beginning of the\n\t\t\t\t# expression.\n\t\t\t\tif j < end and pattern[j] == ']':\n\t\t\t\t\tj += 1\n\n\t\t\t\t# Find closing bracket. Stop once we reach the end or find it.\n\t\t\t\twhile j < end and pattern[j] != ']':\n\t\t\t\t\tj += 1\n\n\t\t\t\tif j < end:\n\t\t\t\t\t# Found end of bracket expression. Increment j to be one past the\n\t\t\t\t\t# closing bracket:\n\t\t\t\t\t#\n\t\t\t\t\t# [...]\n\t\t\t\t\t# ^ ^\n\t\t\t\t\t# i j\n\t\t\t\t\t#\n\t\t\t\t\tj += 1\n\t\t\t\t\texpr = '['\n\n\t\t\t\t\tif pattern[i] == '!':\n\t\t\t\t\t\t# Bracket expression needs to be negated.\n\t\t\t\t\t\texpr += '^'\n\t\t\t\t\t\ti += 1\n\t\t\t\t\telif pattern[i] == '^':\n\t\t\t\t\t\t# POSIX declares that the regex bracket expression negation \"[^...]\"\n\t\t\t\t\t\t# is undefined in a glob pattern. Python's `fnmatch.translate()`\n\t\t\t\t\t\t# escapes the caret ('^') as a literal. Git supports the using a\n\t\t\t\t\t\t# caret for negation. Maintain consistency with Git because that is\n\t\t\t\t\t\t# the expected behavior.\n\t\t\t\t\t\texpr += '^'\n\t\t\t\t\t\ti += 1\n\n\t\t\t\t\t# Build regex bracket expression. Escape slashes so they are treated\n\t\t\t\t\t# as literal slashes by regex as defined by POSIX.\n\t\t\t\t\texpr += pattern[i:j].replace('\\\\', '\\\\\\\\')\n\n\t\t\t\t\t# Add regex bracket expression to regex result.\n\t\t\t\t\tregex += expr\n\n\t\t\t\t\t# Set i to one past the closing bracket.\n\t\t\t\t\ti = j\n\n\t\t\t\telse:\n\t\t\t\t\t# Failed to find closing bracket, treat opening bracket as a bracket\n\t\t\t\t\t# literal instead of as an expression.\n\t\t\t\t\tregex += '\\\\['\n\n\t\t\telse:\n\t\t\t\t# Regular character, escape it for regex.\n\t\t\t\tregex += re.escape(char)\n\n\t\tif escape:\n\t\t\traise ValueError(f\"Escape character found with no next character to escape: {pattern!r}\")\n\n\t\treturn regex\n\n\t@staticmethod\n\tdef escape(s: AnyStr) -> AnyStr:\n\t\t\"\"\"\n\t\tEscape special characters in the given string.\n\n\t\t*s* (:class:`str` or :class:`bytes`) a filename or a string that you want to\n\t\tescape, usually before adding it to a \".gitignore\".\n\n\t\tReturns the escaped string (:class:`str` or :class:`bytes`).\n\t\t\"\"\"\n\t\tif isinstance(s, str):\n\t\t\treturn_type = str\n\t\t\tstring = s\n\t\telif isinstance(s, bytes):\n\t\t\treturn_type = bytes\n\t\t\tstring = s.decode(_BYTES_ENCODING)\n\t\telse:\n\t\t\traise TypeError(f\"s:{s!r} is not a unicode or byte string.\")\n\n\t\t# Reference: https://git-scm.com/docs/gitignore#_pattern_format\n\t\tmeta_characters = r\"[]!*#?\"\n\n\t\tout_string = \"\".join(\"\\\\\" + x if x in meta_characters else x for x in string)\n\n\t\tif return_type is bytes:\n\t\t\treturn out_string.encode(_BYTES_ENCODING)\n\t\telse:\n\t\t\treturn out_string" }, { "identifier": "_DIR_MARK", "path": "third-party/pathspec-0.12.1/pathspec/patterns/gitwildmatch.py", "snippet": "_DIR_MARK = 'ps_d'" }, { "identifier": "_is_iterable", "path": "third-party/pathspec-0.12.1/pathspec/util.py", "snippet": "def _is_iterable(value: Any) -> bool:\n\t\"\"\"\n\tCheck whether the value is an iterable (excludes strings).\n\n\t*value* is the value to check,\n\n\tReturns whether *value* is a iterable (:class:`bool`).\n\t\"\"\"\n\treturn isinstance(value, IterableType) and not isinstance(value, (str, bytes))" } ]
from typing import ( AnyStr, Callable, # Replaced by `collections.abc.Callable` in 3.9. Iterable, # Replaced by `collections.abc.Iterable` in 3.9. Optional, # Replaced by `X | None` in 3.10. Tuple, # Replaced by `tuple` in 3.9. Type, # Replaced by `type` in 3.9. TypeVar, Union, # Replaced by `X | Y` in 3.10. cast, overload) from .pathspec import ( PathSpec) from .pattern import ( Pattern) from .patterns.gitwildmatch import ( GitWildMatchPattern, _DIR_MARK) from .util import ( _is_iterable)
9,145
""" This module provides :class:`.GitIgnoreSpec` which replicates *.gitignore* behavior. """ Self = TypeVar("Self", bound="GitIgnoreSpec") """ :class:`GitIgnoreSpec` self type hint to support Python v<3.11 using PEP 673 recommendation. """ class GitIgnoreSpec(PathSpec): """ The :class:`GitIgnoreSpec` class extends :class:`pathspec.pathspec.PathSpec` to replicate *.gitignore* behavior. """ def __eq__(self, other: object) -> bool: """ Tests the equality of this gitignore-spec with *other* (:class:`GitIgnoreSpec`) by comparing their :attr:`~pathspec.pattern.Pattern` attributes. A non-:class:`GitIgnoreSpec` will not compare equal. """ if isinstance(other, GitIgnoreSpec): return super().__eq__(other) elif isinstance(other, PathSpec): return False else: return NotImplemented # Support reversed order of arguments from PathSpec. @overload @classmethod def from_lines( cls: Type[Self], pattern_factory: Union[str, Callable[[AnyStr], Pattern]], lines: Iterable[AnyStr], ) -> Self: ... @overload @classmethod def from_lines( cls: Type[Self], lines: Iterable[AnyStr], pattern_factory: Union[str, Callable[[AnyStr], Pattern], None] = None, ) -> Self: ... @classmethod def from_lines( cls: Type[Self], lines: Iterable[AnyStr], pattern_factory: Union[str, Callable[[AnyStr], Pattern], None] = None, ) -> Self: """ Compiles the pattern lines. *lines* (:class:`~collections.abc.Iterable`) yields each uncompiled pattern (:class:`str`). This simply has to yield each line so it can be a :class:`io.TextIOBase` (e.g., from :func:`open` or :class:`io.StringIO`) or the result from :meth:`str.splitlines`. *pattern_factory* can be :data:`None`, the name of a registered pattern factory (:class:`str`), or a :class:`~collections.abc.Callable` used to compile patterns. The callable must accept an uncompiled pattern (:class:`str`) and return the compiled pattern (:class:`pathspec.pattern.Pattern`). Default is :data:`None` for :class:`.GitWildMatchPattern`). Returns the :class:`GitIgnoreSpec` instance. """ if pattern_factory is None: pattern_factory = GitWildMatchPattern elif (isinstance(lines, (str, bytes)) or callable(lines)) and _is_iterable(pattern_factory): # Support reversed order of arguments from PathSpec. pattern_factory, lines = lines, pattern_factory self = super().from_lines(pattern_factory, lines) return cast(Self, self) @staticmethod def _match_file( patterns: Iterable[Tuple[int, GitWildMatchPattern]], file: str, ) -> Tuple[Optional[bool], Optional[int]]: """ Check the file against the patterns. .. NOTE:: Subclasses of :class:`~pathspec.pathspec.PathSpec` may override this method as an instance method. It does not have to be a static method. The signature for this method is subject to change. *patterns* (:class:`~collections.abc.Iterable`) yields each indexed pattern (:class:`tuple`) which contains the pattern index (:class:`int`) and actual pattern (:class:`~pathspec.pattern.Pattern`). *file* (:class:`str`) is the normalized file path to be matched against *patterns*. Returns a :class:`tuple` containing whether to include *file* (:class:`bool` or :data:`None`), and the index of the last matched pattern (:class:`int` or :data:`None`). """ out_include: Optional[bool] = None out_index: Optional[int] = None out_priority = 0 for index, pattern in patterns: if pattern.include is not None: match = pattern.match_file(file) if match is not None: # Pattern matched. # Check for directory marker.
""" This module provides :class:`.GitIgnoreSpec` which replicates *.gitignore* behavior. """ Self = TypeVar("Self", bound="GitIgnoreSpec") """ :class:`GitIgnoreSpec` self type hint to support Python v<3.11 using PEP 673 recommendation. """ class GitIgnoreSpec(PathSpec): """ The :class:`GitIgnoreSpec` class extends :class:`pathspec.pathspec.PathSpec` to replicate *.gitignore* behavior. """ def __eq__(self, other: object) -> bool: """ Tests the equality of this gitignore-spec with *other* (:class:`GitIgnoreSpec`) by comparing their :attr:`~pathspec.pattern.Pattern` attributes. A non-:class:`GitIgnoreSpec` will not compare equal. """ if isinstance(other, GitIgnoreSpec): return super().__eq__(other) elif isinstance(other, PathSpec): return False else: return NotImplemented # Support reversed order of arguments from PathSpec. @overload @classmethod def from_lines( cls: Type[Self], pattern_factory: Union[str, Callable[[AnyStr], Pattern]], lines: Iterable[AnyStr], ) -> Self: ... @overload @classmethod def from_lines( cls: Type[Self], lines: Iterable[AnyStr], pattern_factory: Union[str, Callable[[AnyStr], Pattern], None] = None, ) -> Self: ... @classmethod def from_lines( cls: Type[Self], lines: Iterable[AnyStr], pattern_factory: Union[str, Callable[[AnyStr], Pattern], None] = None, ) -> Self: """ Compiles the pattern lines. *lines* (:class:`~collections.abc.Iterable`) yields each uncompiled pattern (:class:`str`). This simply has to yield each line so it can be a :class:`io.TextIOBase` (e.g., from :func:`open` or :class:`io.StringIO`) or the result from :meth:`str.splitlines`. *pattern_factory* can be :data:`None`, the name of a registered pattern factory (:class:`str`), or a :class:`~collections.abc.Callable` used to compile patterns. The callable must accept an uncompiled pattern (:class:`str`) and return the compiled pattern (:class:`pathspec.pattern.Pattern`). Default is :data:`None` for :class:`.GitWildMatchPattern`). Returns the :class:`GitIgnoreSpec` instance. """ if pattern_factory is None: pattern_factory = GitWildMatchPattern elif (isinstance(lines, (str, bytes)) or callable(lines)) and _is_iterable(pattern_factory): # Support reversed order of arguments from PathSpec. pattern_factory, lines = lines, pattern_factory self = super().from_lines(pattern_factory, lines) return cast(Self, self) @staticmethod def _match_file( patterns: Iterable[Tuple[int, GitWildMatchPattern]], file: str, ) -> Tuple[Optional[bool], Optional[int]]: """ Check the file against the patterns. .. NOTE:: Subclasses of :class:`~pathspec.pathspec.PathSpec` may override this method as an instance method. It does not have to be a static method. The signature for this method is subject to change. *patterns* (:class:`~collections.abc.Iterable`) yields each indexed pattern (:class:`tuple`) which contains the pattern index (:class:`int`) and actual pattern (:class:`~pathspec.pattern.Pattern`). *file* (:class:`str`) is the normalized file path to be matched against *patterns*. Returns a :class:`tuple` containing whether to include *file* (:class:`bool` or :data:`None`), and the index of the last matched pattern (:class:`int` or :data:`None`). """ out_include: Optional[bool] = None out_index: Optional[int] = None out_priority = 0 for index, pattern in patterns: if pattern.include is not None: match = pattern.match_file(file) if match is not None: # Pattern matched. # Check for directory marker.
dir_mark = match.match.groupdict().get(_DIR_MARK)
3
2023-12-14 07:43:03+00:00
12k
pan-x-c/EE-LLM
tests/unit_tests/transformer/test_spec_customization.py
[ { "identifier": "get_bias_dropout_add", "path": "megatron/core/fusions/fused_bias_dropout.py", "snippet": "def get_bias_dropout_add(training, fused):\n if fused:\n # jit scripting for a nn.module (with dropout) is not\n # triggering the fusion kernel. For now, we use two\n # different nn.functional routines to account for varying\n # dropout semantics during training and inference phases.\n if training:\n return bias_dropout_add_fused_train\n else:\n return bias_dropout_add_fused_inference\n else:\n return bias_dropout_add_unfused(training)" }, { "identifier": "model_parallel_cuda_manual_seed", "path": "megatron/core/tensor_parallel/random.py", "snippet": "def model_parallel_cuda_manual_seed(seed):\n \"\"\"Initialize model parallel cuda seed.\n\n This function should be called after the model parallel is\n initialized. Also, no torch.cuda.manual_seed should be called\n after this function. Basically, this is replacement for that\n function.\n Two set of RNG states are tracked:\n default state: This is for data parallelism and is the same among a\n set of model parallel GPUs but different across\n different model paralle groups. This is used for\n example for dropout in the non-tensor-model-parallel regions.\n tensor-model-parallel state: This state is different among a set of model\n parallel GPUs, but the same across data parallel\n groups. This is used for example for dropout in\n model parallel regions.\n \"\"\"\n # 2718 is just for fun and any POSITIVE value will work.\n offset = seed + 2718\n tensor_model_parallel_seed = offset + get_tensor_model_parallel_rank()\n # Data parallel gets the original seed.\n data_parallel_seed = seed\n\n _CUDA_RNG_STATE_TRACKER.reset()\n # Set the default state.\n torch.cuda.manual_seed(data_parallel_seed)\n # and model parallel state.\n _CUDA_RNG_STATE_TRACKER.add(_MODEL_PARALLEL_RNG_TRACKER_NAME, tensor_model_parallel_seed)\n\n expert_parallel_seed = (\n seed + 100 * get_expert_model_parallel_rank() + get_tensor_model_parallel_rank()\n )\n _CUDA_RNG_STATE_TRACKER.add(_EXPERT_PARALLEL_RNG_TRACKER_NAME, expert_parallel_seed)" }, { "identifier": "SelfAttention", "path": "megatron/core/transformer/attention.py", "snippet": "class SelfAttention(Attention):\n \"\"\"Self-attention layer class\n\n Self-attention layer takes input with size [s, b, h]\n and returns output of the same size.\n \"\"\"\n\n def __init__(\n self,\n config: TransformerConfig,\n submodules: SelfAttentionSubmodules,\n layer_number: int = 1,\n attn_mask_type=AttnMaskType.padding,\n **kwargs,\n ):\n super().__init__(\n config=config,\n submodules=submodules,\n layer_number=layer_number,\n attn_mask_type=attn_mask_type,\n **kwargs,\n )\n\n self.linear_qkv = build_module(\n submodules.linear_qkv,\n self.config.hidden_size,\n self.query_projection_size + 2 * self.kv_projection_size,\n config=self.config,\n init_method=self.config.init_method,\n bias=self.config.add_bias_linear,\n skip_bias_add=False,\n )\n\n def get_query_key_value_tensors(self, hidden_states, key_value_states=None):\n \"\"\"\n Derives `query`, `key` and `value` tensors from `hidden_states`.\n \"\"\"\n # Attention heads [sq, b, h] --> [sq, b, ng * (np/ng + 2) * hn)]\n mixed_qkv, _ = self.linear_qkv(hidden_states)\n\n # [sq, b, hp] --> [sq, b, ng, (np/ng + 2) * hn]\n new_tensor_shape = mixed_qkv.size()[:-1] + (\n self.num_query_groups_per_partition,\n (\n (self.num_attention_heads_per_partition // self.num_query_groups_per_partition + 2)\n * self.hidden_size_per_attention_head\n ),\n )\n mixed_qkv = mixed_qkv.view(*new_tensor_shape)\n\n # [sq, b, ng, (np/ng + 2) * hn] --> [sq, b, ng, np/ng * hn], [sq, b, ng, hn], [sq, b, ng, hn]\n (query, key, value) = torch.split(\n mixed_qkv,\n [\n (\n self.num_attention_heads_per_partition\n // self.num_query_groups_per_partition\n * self.hidden_size_per_attention_head\n ),\n self.hidden_size_per_attention_head,\n self.hidden_size_per_attention_head,\n ],\n dim=3,\n )\n # [sq, b, ng, np/ng * hn] -> [sq, b, np, hn]\n query = query.reshape(query.size(0), query.size(1), -1, self.hidden_size_per_attention_head)\n\n return query, key, value" }, { "identifier": "SelfAttentionSubmodules", "path": "megatron/core/transformer/attention.py", "snippet": "class SelfAttentionSubmodules:\n linear_qkv: Union[ModuleSpec, type] = None\n dot_product_attention: Union[ModuleSpec, type] = None\n linear_proj: Union[ModuleSpec, type] = None" }, { "identifier": "TEDotProductAttention", "path": "megatron/core/transformer/custom_layers/transformer_engine.py", "snippet": "class TEDotProductAttention(te.pytorch.DotProductAttention):\n \"\"\"\n Wrapper for the Transformer-Engine's `DotProductAttention` layer that also\n has \"flash attention\" enabled.\n\n Note that if Megatron's parallel_state has not been initialized yet, the\n tp_group and cp_group passed to TE will be None and must be set later\n via set_tensor_parallel_group() and set_context_parallel_group().\n \"\"\"\n\n cp_stream: torch.cuda.Stream = None\n\n def __init__(\n self,\n config: TransformerConfig,\n layer_number: int = 1,\n attn_mask_type: AttnMaskType = AttnMaskType.padding,\n **kwargs\n ):\n self.config = config\n\n # Only Transformer-Engine version > 0.13.0 supports context parallelism\n te_version = packaging.version.Version(version(\"transformer-engine\"))\n if te_version > packaging.version.Version(\"0.13.0\"):\n if getattr(TEDotProductAttention, \"cp_stream\") is None:\n TEDotProductAttention.cp_stream = torch.cuda.Stream()\n kwargs[\"cp_group\"] = get_context_parallel_group(check_initialized=False)\n kwargs[\"cp_global_ranks\"] = get_context_parallel_global_ranks(check_initialized=False)\n kwargs[\"cp_stream\"] = TEDotProductAttention.cp_stream\n else:\n assert (\n self.config.context_parallel_size == 1\n ), \"Only Transformer-Engine version > 0.13.0 supports context parallelism\"\n\n super().__init__(\n num_attention_heads=self.config.num_attention_heads,\n kv_channels=self.config.kv_channels,\n attention_dropout=self.config.attention_dropout,\n layer_number=layer_number,\n attn_mask_type=attn_mask_type.name,\n sequence_parallel=self.config.sequence_parallel,\n tp_size=self.config.tensor_model_parallel_size,\n get_rng_state_tracker=get_cuda_rng_tracker,\n tp_group=get_tensor_model_parallel_group(check_initialized=False),\n **kwargs,\n )" }, { "identifier": "TELayerNormColumnParallelLinear", "path": "megatron/core/transformer/custom_layers/transformer_engine.py", "snippet": "class TELayerNormColumnParallelLinear(te.pytorch.LayerNormLinear):\n \"\"\"\n Wrapper for the Transformer-Engine's `LayerNormLinear` layer that combines\n layernorm and linear layers\n \"\"\"\n\n def __init__(\n self,\n input_size: int,\n output_size: int,\n config: TransformerConfig,\n init_method: Callable,\n bias: bool,\n skip_bias_add: bool,\n **kwargs\n ):\n self.config = config\n # TE returns a zero length Tensor when bias=False and\n # return_bias=True, but we prefer None. So in that case we\n # tell TE to not return the bias, and return None\n # ourselves. This way our forward always returns two values\n # and we don't have to deal with the zero length Tensor.\n self.te_return_bias = skip_bias_add and bias\n\n # Only Transformer-Engine version >= 0.11.0 supports `RMSNorm`\n te_version = packaging.version.Version(version(\"transformer-engine\"))\n if te_version >= packaging.version.Version(\"0.11.0\"):\n kwargs[\"normalization\"] = self.config.normalization\n\n super().__init__(\n in_features=input_size,\n out_features=output_size,\n bias=bias,\n sequence_parallel=self.config.sequence_parallel,\n fuse_wgrad_accumulation=self.config.gradient_accumulation_fusion,\n tp_group=get_tensor_model_parallel_group(check_initialized=False),\n tp_size=self.config.tensor_model_parallel_size,\n get_rng_state_tracker=get_cuda_rng_tracker,\n init_method=init_method,\n params_dtype=self.config.params_dtype,\n parallel_mode=\"column\",\n return_bias=self.te_return_bias,\n **_get_extra_te_kwargs(config),\n )\n\n def forward(self, x):\n out = super().forward(x)\n\n # TE only returns a tuple when return_bias is True, otherwise\n # it returns a single Tensor, we always want to return two\n # values regardless of the arguments.\n if self.te_return_bias:\n return out\n return out, None" }, { "identifier": "TENorm", "path": "megatron/core/transformer/custom_layers/transformer_engine.py", "snippet": "class TENorm:\n \"\"\"\n A conditional wrapper to initialize an instance of Transformer-Engine's\n `LayerNorm` or `RMSNorm` based on input\n \"\"\"\n\n def __new__(\n cls,\n config: TransformerConfig,\n hidden_size: int,\n eps: float = 1e-5,\n sequence_parallel: bool = False,\n normalization: str = \"LayerNorm\",\n **kwargs\n ):\n if normalization == \"LayerNorm\":\n instance = te.pytorch.LayerNorm(\n hidden_size=hidden_size,\n eps=eps,\n sequence_parallel=sequence_parallel,\n **_get_extra_te_kwargs(config),\n )\n elif normalization == \"RMSNorm\":\n assert hasattr(\n te.pytorch, \"RMSNorm\"\n ), \"Transformer-Engine >= v0.11 required to use this feature\"\n instance = te.pytorch.RMSNorm(\n hidden_size=hidden_size,\n eps=eps,\n sequence_parallel=sequence_parallel,\n **_get_extra_te_kwargs(config),\n )\n else:\n raise Exception('Only LayerNorm and RMSNorm are curently supported')\n\n return instance" }, { "identifier": "TERowParallelLinear", "path": "megatron/core/transformer/custom_layers/transformer_engine.py", "snippet": "class TERowParallelLinear(TELinear):\n \"\"\"\n Wrapper for the Transformer-Engine's `Linear` layer but specialized similar\n to megatron's `RowParallelLinear` layer.\n \"\"\"\n\n def __init__(self, input_size: int, output_size: int, config: TransformerConfig, **kwargs):\n self.config = config\n super().__init__(\n input_size=input_size,\n output_size=output_size,\n config=self.config,\n parallel_mode=\"row\",\n **kwargs,\n )" }, { "identifier": "AttnMaskType", "path": "megatron/core/transformer/enums.py", "snippet": "class AttnMaskType(enum.Enum):\n padding = 1\n causal = 2" }, { "identifier": "IdentityFuncOp", "path": "megatron/core/transformer/identity_op.py", "snippet": "class IdentityFuncOp(IdentityOp):\n \"\"\"\n This is a placeholder for IdentityFuncOp(...)(x) -> IdentityOp(x) -> x.\n Such a func is handy for ops like `bias_dropout_fusion` which themselves\n return a function at runtime based on passed arguments\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__()\n\n def forward(self, *args, **kwargs):\n return super().forward" }, { "identifier": "IdentityOp", "path": "megatron/core/transformer/identity_op.py", "snippet": "class IdentityOp(torch.nn.Module):\n \"\"\"\n This is a placeholder for IdentityOp(x) -> x\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__()\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "ModuleSpec", "path": "megatron/core/transformer/spec_utils.py", "snippet": "class ModuleSpec:\n \"\"\"This is a Module Specification dataclass.\n\n Specification defines the location of the module (to import dynamically)\n or the imported module itself. It also defines the params that need to be\n passed to initialize the module.\n\n Args:\n module (Union[Tuple, type]): A tuple describing the location of the\n module class e.g. `(module.location, ModuleClass)` or the imported\n module class itself e.g. `ModuleClass` (which is already imported\n using `from module.location import ModuleClass`).\n params (dict): A dictionary of params that need to be passed while init.\n\n \"\"\"\n\n module: Union[Tuple, type]\n params: dict = field(default_factory=lambda: {})\n submodules: type = None" }, { "identifier": "build_module", "path": "megatron/core/transformer/spec_utils.py", "snippet": "def build_module(spec_or_module: Union[ModuleSpec, type], *args, **kwargs):\n # If the passed `spec_or_module` is\n # a `Function`, then return it as it is\n # NOTE: to support an already initialized module add the following condition\n # `or isinstance(spec_or_module, torch.nn.Module)` to the following if check\n if isinstance(spec_or_module, types.FunctionType):\n return spec_or_module\n\n # If the passed `spec_or_module` is actually a spec (instance of\n # `ModuleSpec`) and it specifies a `Function` using its `module`\n # field, return the `Function` as it is\n if isinstance(spec_or_module, ModuleSpec) and isinstance(\n spec_or_module.module, types.FunctionType\n ):\n return spec_or_module.module\n\n # Check if a module class is provided as a spec or if the module path\n # itself is a class\n if isinstance(spec_or_module, type):\n module = spec_or_module\n elif hasattr(spec_or_module, \"module\") and isinstance(spec_or_module.module, type):\n module = spec_or_module.module\n else:\n # Otherwise, dynamically import the module from the module path\n module = import_module(spec_or_module.module)\n\n # If the imported module is actually a `Function` return it as it is\n if isinstance(module, types.FunctionType):\n return module\n\n # Finally return the initialized module with params from the spec as well\n # as those passed as **kwargs from the code\n\n # Add the `submodules` argument to the module init call if it exists in the\n # spec.\n if hasattr(spec_or_module, \"submodules\") and spec_or_module.submodules is not None:\n kwargs[\"submodules\"] = spec_or_module.submodules\n\n return module(\n *args, **spec_or_module.params if hasattr(spec_or_module, \"params\") else {}, **kwargs\n )" }, { "identifier": "import_module", "path": "megatron/core/transformer/spec_utils.py", "snippet": "def import_module(module_path: Tuple[str]):\n \"\"\"Import a named object from a module in the context of this function.\n\n TODO: make this importer module more robust, at least make sure there\n are no side effects of using this as is\n \"\"\"\n base_path, name = module_path\n try:\n module = __import__(base_path, globals(), locals(), [name])\n except ImportError as e:\n print(f\"couldn't import module due to {e}\")\n return None\n return vars(module)[name]" }, { "identifier": "TransformerConfig", "path": "megatron/core/transformer/transformer_config.py", "snippet": "class TransformerConfig(ModelParallelConfig):\n \"\"\"Configuration object for megatron-core transformers.\n\n Attributes:\n\n # model architecture\n num_layers (int): Number of transformer layers in a transformer block.\n hidden_size (int): Transformer hidden size.\n ffn_hidden_size (int): Transformer Feed-Forward Network hidden size.\n This is set to 4*hidden_size if not provided. Defaults to None.')\n num_attention_heads (int): Number of transformer attention heads.\n kv_channels (int): Projection weights dimension in multi-head attention.\n This is set to hidden_size // num_attention_heads if not provided.\n Defaults to None.\n num_query_groups (int): Number of query groups for group query attention. If None, normal attention is used.\n\n hidden_dropout (float): Dropout probability for transformer hidden state. Defaults to 0.1.\n attention_dropout (float): Post attention dropout probability. Defaults to 0.1.\n fp32_residual_connection (bool): If true, move residual connections to fp32.\n apply_residual_connection_post_layernorm (bool): If true, uses the original BERT residule connection ordering.\n Defaults to False.\n layernorm_epsilon (float): Layernorm epsilon. Defaults to 1e-5.\n\n layernorm_zero_centered_gamma (bool): if set to 'True', the LayerNorm is adjusted to center the gamma values\n around 0. This improves numerical stability. Defaults to False.\n\n add_bias_linear (bool): Include a bias term in all linear layers (QKV projections, after core attention, and two\n in MLP layer). Default is True.\n\n gated_linear_unit (bool): Use a gated linear unit for the first linear layer in the MLP. Defaults to False.\n\n activation_func (Callable): Activation function to use for the non-linearity in the MLP. Defaults to F.gelu.\n\n num_moe_experts (int): Number of experts to use for Mixture of Experts. \n When set, it replaces MLP with Switch MLP. Defaults to None (no MoE).\n\n # initialization\n init_method (Callable): Method to initialize weights. Note that bias is always set to\n zero. Should be a function that takes a single Tensor and\n initializes it. Defaults to\n megatron.core.utils.init_method_normal(init_method_std) which is\n torch.nn.init.normal_ with mean=0.0 and std=init_method_Std.\n\n output_layer_init_method (Callable): Method to initialize weights of the output layer of\n both attention and MLP blocks. Defaults to\n megatron.core.utils.scaled_init_method_normal(init_method_std)\n which is torch.nn.init.normal_ with mean=0.0 and\n std=init_method_std / math.sqrt(2.0 * num_layers).\n\n init_method_std (float): Standard deviation of the zero mean normal for the default\n initialization method, not used if init_method and\n output_layer_init_method are provided. Defaults to 0.02.\n\n # mixed-precision\n apply_query_key_layer_scaling (bool): If true, scale Q * K^T by 1 / layer-number. Defaults to True.\n attention_softmax_in_fp32 (bool): If true, run attention masking and softmax in fp32.\n This should be true if apply_query_key_layer_scaling is true.\n\n # fusion\n bias_gelu_fustion (bool): If true, fuses bias and gelu. Defaults to False.\n masked_softmax_fusion (bool): If true, uses softmax fusion.\n persist_layer_norm (bool): If true, uses the persistent fused layer norm kernel.\n This kernel only supports a fixed set of hidden sizes.\n Defaults to False.\n bias_dropout_fusion (bool): If true, uses bias dropout fusion.\n\n # activation recomputation\n\n recompute_granularity (str): megatron-core supports 'selective' activation checkpointing where only the memory\n intensive part of attention is checkpointed. These memory intensive activations\n are also less compute intensive which makes activation checkpointing more efficient\n for LLMs (20B+). See Reducing Activation Recomputation in Large Transformer\n Models: https://arxiv.org/abs/2205.05198 for more details. 'full' will checkpoint\n the entire transformer layer. Must be 'selective' or 'full'. 'selective' always uses all layers.\n Defaults to None.\n\n recompute_method (str): uniform will uniformly divide the total number of transformer layers in a transformer\n block and recompute the input activation of each divided chunk at the specified\n granularity. block will recompute the input activations for only a set number of\n transformer layers per pipeline stage. The rest of the layers in the pipeline stage\n will not have any activations recomputed. Must be 'uniform' or 'block'. Defaults to\n None.\n\n recompute_num_layers (int): When recompute_method is uniform, recompute_num_layers is the number of transformer\n layers in each uniformly divided recompute unit. When recompute_method is block,\n recompute_num_layers is the number of transformer layers to recompute within each\n pipeline stage. Must be None for 'selective' activation checkpointing. Defaults to None.\n\n distribute_saved_activations (bool): If true, distribute recomputed activations across the model parallel\n group. Defaults to None.\n\n # fp8 related (via Transformer Engine). For detailed info, refer the the Transformer Engine docs at\n # https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/common.html\n\n fp8 (str): If set, enables the use of FP8 precision through Transformer Engine. There are 2 predefined choices: (1) 'e4m3'\n uniformly uses e4m3 for all FP8 tensors, (2) 'hybrid' uses e4m3 for all FP8 activation and weight tensors and\n e5m2 for all FP8 output activation gradient tensors. Defaults to None.\n\n fp8_margin (int): Margin for the scaling factor computation.\n\n fp8_interval (int): Controls how often the scaling factor is recomputed.\n\n fp8_amax_history_len (int): The length of the amax history window used for scaling factor computation.\n\n fp8_amax_compute_algo (str): Algorithm used for choosing the `amax` value for the scaling factor computation.\n There are 2 predefined choices: `max` chooses the largest `amax` in the history\n window, while `most_recent` always chooses the most recently seen value.\n\n fp8_wgrad (bool): When set to False, override FP8 config options and do the wgrad computation in higher precision.\n Defaults to True.\n\n # Experimental\n normalization (str): Swtich b/w `LayerNorm` and `RMSNorm` as normalization layers. For now, these are primarily\n used by Transformer-Engine's layers like `LayerNormLinear`. Default value is `LayerNorm`.\n\n\n \"\"\"\n\n # model architecture\n num_layers: int = 0\n hidden_size: int = 0\n num_attention_heads: int = 0\n num_query_groups: int = None\n\n ffn_hidden_size: int = None\n kv_channels: int = None\n hidden_dropout: float = 0.1\n attention_dropout: float = 0.1\n fp32_residual_connection: bool = False\n # @jcasper should we keep this option?\n apply_residual_connection_post_layernorm: bool = False\n layernorm_epsilon: float = 1e-5\n layernorm_zero_centered_gamma: bool = False\n add_bias_linear: bool = True\n gated_linear_unit: bool = False\n activation_func: Callable = F.gelu\n num_moe_experts: int = None\n\n # initialization\n init_method: Callable = None\n output_layer_init_method: Callable = None\n init_method_std: float = 0.02\n\n # mixed-precision\n apply_query_key_layer_scaling: bool = True\n attention_softmax_in_fp32: bool = True\n\n # communication\n\n # fusion\n bias_gelu_fusion: bool = False # TODO: this should be bias_activation_fusion ?\n masked_softmax_fusion: bool = False\n persist_layer_norm: bool = False\n bias_dropout_fusion: bool = False # TODO: this should be bias_dropout_add_fusion?\n\n # activation recomputation\n recompute_granularity: str = None\n recompute_method: str = None\n recompute_num_layers: int = None\n distribute_saved_activations: bool = None\n\n # fp8 related\n fp8: str = None\n fp8_margin: int = 0\n fp8_interval: int = 1\n fp8_amax_history_len: int = 1\n fp8_amax_compute_algo: str = \"most_recent\"\n fp8_wgrad: bool = True\n\n # experimental section (TODO: move to apt. section above once stable)\n normalization: bool = \"LayerNorm\" # alt value supported by TE: \"RMSNorm\"\n\n def __post_init__(self):\n \"\"\" Python dataclass method that is used to modify attributes after initialization.\n See https://docs.python.org/3/library/dataclasses.html#post-init-processing for more details.\n \"\"\"\n super().__post_init__()\n if self.fp16 and self.bf16:\n raise ValueError(\n f'Only one of self.fp16: {self.fp16} and self.bf16 {self.bf16} should be True.'\n )\n\n if self.num_attention_heads % self.tensor_model_parallel_size != 0:\n raise ValueError(\n f\"num_attention_heads ({self.num_attention_heads}) must be a multiple of \"\n f\"tensor_model_parallel_size ({self.tensor_model_parallel_size}).\"\n )\n\n if self.ffn_hidden_size is None:\n self.ffn_hidden_size = 4 * self.hidden_size\n\n if self.kv_channels is None:\n self.kv_channels = self.hidden_size // self.num_attention_heads\n\n if self.num_query_groups is None:\n self.num_query_groups = self.num_attention_heads\n\n if self.num_query_groups % self.tensor_model_parallel_size != 0:\n raise ValueError(\n f\"num_query_groups ({self.num_query_groups}) must be a multiple of \"\n f\"tensor_model_parallel_size ({self.tensor_model_parallel_size}).\"\n )\n\n if self.apply_query_key_layer_scaling:\n self.attention_softmax_in_fp32 = True\n\n if self.expert_model_parallel_size > 1 and self.num_moe_experts is None:\n raise ValueError(f'num_moe_experts must be non None to use expert-parallel.')\n\n if self.recompute_granularity is not None:\n if not self.recompute_granularity in ['full', 'selective']:\n raise ValueError(\n f'When using recompute_granuarlity: {self.recompute_granularity} must be \"full\" or \"selective\".'\n )\n\n if self.recompute_method is not None:\n if not self.recompute_method in ['block', 'uniform']:\n raise ValueError(\n f'recompute_method: {self.recompute_method} must be \"block\" or \"uniform\".'\n )\n elif self.recompute_granularity != 'selective':\n raise ValueError(\n f'Using recompute_granularity: {self.recompute_granularity} so recompute_method must be \"block\" or \"uniform\"'\n )\n\n if self.recompute_granularity != 'selective' and self.recompute_num_layers is None:\n raise ValueError(\n f'When using recompute_granularity: {self.recompute_granularity} recompute_num_layers must be between '\n f'1 and num_layers_per_pipeline_rank: {self.num_layers // self.pipeline_model_parallel_size}'\n )\n elif (\n self.recompute_granularity == 'selective' and self.recompute_num_layers is not None\n ):\n raise ValueError(\n f'When using recompute_granularity: {self.recompute_granularity} recompute_num_layers must be None.'\n )\n\n if self.distribute_saved_activations and self.sequence_parallel:\n raise ValueError(\n f'distribute_saved_activations: {self.distribute_saved_activations} must be false when sequence parallel is enabled: {self.sequence_parallel}'\n )\n\n if self.virtual_pipeline_model_parallel_size is not None:\n if not self.num_layers % self.virtual_pipeline_model_parallel_size == 0:\n raise ValueError(\n f'num_layers: {self.num_layers} must be divisible by virtual_model_parallel_size {self.virtual_pipeline_model_parallel_size}'\n )\n\n if self.apply_query_key_layer_scaling:\n self.attention_softmax_in_fp32 = True\n\n if self.bias_gelu_fusion:\n if not self.add_bias_linear:\n raise ValueError(\n \"When bias_gelu_fusion is True, add_bias_linear must also be True.\"\n )\n\n if self.activation_func != F.gelu:\n raise ValueError(f'When bias_gelu_fusion is True, activation_func must be F.gelu.')\n\n if self.init_method is None:\n self.init_method = init_method_normal(self.init_method_std)\n\n if self.output_layer_init_method is None:\n self.output_layer_init_method = scaled_init_method_normal(\n self.init_method_std, self.num_layers\n )" }, { "identifier": "TransformerLayerSubmodules", "path": "megatron/core/transformer/transformer_layer.py", "snippet": "class TransformerLayerSubmodules:\n input_layernorm: Union[ModuleSpec, type] = IdentityOp\n self_attention: Union[ModuleSpec, type] = IdentityOp\n self_attn_bda: Union[ModuleSpec, type] = IdentityFuncOp\n\n pre_cross_attn_layernorm: Union[ModuleSpec, type] = IdentityOp\n cross_attention: Union[ModuleSpec, type] = IdentityOp\n cross_attn_bda: Union[ModuleSpec, type] = IdentityFuncOp\n\n pre_mlp_layernorm: Union[ModuleSpec, type] = IdentityOp\n mlp: Union[ModuleSpec, type] = IdentityOp\n mlp_bda: Union[ModuleSpec, type] = IdentityFuncOp" }, { "identifier": "Utils", "path": "tests/unit_tests/test_utilities.py", "snippet": "class Utils:\n\n world_size = torch.cuda.device_count()\n rank = int(os.environ['LOCAL_RANK'])\n\n @staticmethod\n def initialize_distributed():\n print(f'Initializing torch.distributed with rank: {Utils.rank}, world_size: {Utils.world_size}')\n torch.cuda.set_device(Utils.rank % torch.cuda.device_count())\n init_method = 'tcp://'\n master_ip = os.getenv('MASTER_ADDR', 'localhost')\n master_port = os.getenv('MASTER_PORT', '6000')\n init_method += master_ip + ':' + master_port\n torch.distributed.init_process_group(backend='nccl', world_size=Utils.world_size, rank=Utils.rank, init_method=init_method)\n \n @staticmethod\n def destroy_model_parallel():\n ps.destroy_model_parallel()\n torch.distributed.barrier()\n\n @staticmethod\n def initialize_model_parallel(tensor_model_parallel_size = 1, pipeline_model_parallel_size = 1, virtual_pipeline_model_parallel_size = None, pipeline_model_parallel_split_rank = None):\n ps.destroy_model_parallel()\n if not torch.distributed.is_initialized():\n Utils.initialize_distributed()\n ps.initialize_model_parallel(tensor_model_parallel_size, pipeline_model_parallel_size, virtual_pipeline_model_parallel_size, pipeline_model_parallel_split_rank)" } ]
from dataclasses import dataclass, fields from megatron.core.fusions.fused_bias_dropout import get_bias_dropout_add from megatron.core.tensor_parallel.random import model_parallel_cuda_manual_seed from megatron.core.transformer.attention import SelfAttention, SelfAttentionSubmodules from megatron.core.transformer.custom_layers.transformer_engine import ( TEDotProductAttention, TELayerNormColumnParallelLinear, TENorm, TERowParallelLinear, ) from megatron.core.transformer.enums import AttnMaskType from megatron.core.transformer.identity_op import IdentityFuncOp, IdentityOp from megatron.core.transformer.spec_utils import ModuleSpec, build_module, import_module from megatron.core.transformer.transformer_config import TransformerConfig from megatron.core.transformer.transformer_layer import TransformerLayerSubmodules from tests.unit_tests.test_utilities import Utils import pytest import torch import transformer_engine as te
7,730
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. class TestSpecCustomization: def setup_method(self, method): Utils.initialize_model_parallel(1, 1) model_parallel_cuda_manual_seed(123) self.config = TransformerConfig( num_layers=2, hidden_size=12, num_attention_heads=4, use_cpu_initialization=True ) # specify Transformer Layer spec with all identity ops self.transformer_layer_spec = TransformerLayerSubmodules() # specify attention spec using already imported class self.attention_spec = ModuleSpec( module=SelfAttention, params={"attn_mask_type": AttnMaskType.causal},
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. class TestSpecCustomization: def setup_method(self, method): Utils.initialize_model_parallel(1, 1) model_parallel_cuda_manual_seed(123) self.config = TransformerConfig( num_layers=2, hidden_size=12, num_attention_heads=4, use_cpu_initialization=True ) # specify Transformer Layer spec with all identity ops self.transformer_layer_spec = TransformerLayerSubmodules() # specify attention spec using already imported class self.attention_spec = ModuleSpec( module=SelfAttention, params={"attn_mask_type": AttnMaskType.causal},
submodules=SelfAttentionSubmodules(
3
2023-12-07 08:29:38+00:00
12k
mitrefireline/simharness
main.py
[ { "identifier": "RenderEnv", "path": "simharness2/callbacks/render_env.py", "snippet": "class RenderEnv(DefaultCallbacks):\n \"\"\"To use this callback, set {\"callbacks\": RenderEnv} in the algo config.\"\"\"\n\n def on_algorithm_init(\n self,\n *,\n algorithm: \"Algorithm\",\n **kwargs,\n ) -> None:\n \"\"\"Callback run when a new algorithm instance has finished setup.\n\n This method gets called at the end of Algorithm.setup() after all\n the initialization is done, and before actually training starts.\n\n Args:\n algorithm: Reference to the Algorithm instance.\n kwargs: Forward compatibility placeholder.\n \"\"\"\n logdir = algorithm.logdir\n # TODO: Handle edge case where num_evaluation_workers == 0.\n # Make the trial result path accessible to each env (for gif saving).\n algorithm.evaluation_workers.foreach_worker(\n lambda w: w.foreach_env(lambda env: setattr(env, \"trial_logdir\", logdir)),\n local_worker=False,\n )\n\n def on_episode_created(\n self,\n *,\n worker: \"RolloutWorker\",\n base_env: BaseEnv,\n # policies: Dict[PolicyID, Policy],\n # episode: Union[Episode, EpisodeV2],\n env_index: Optional[int] = None,\n **kwargs,\n ) -> None:\n \"\"\"Callback run right after an Episode has started.\n\n This method gets called after the Episode(V2)'s respective sub-environment's\n (usually a gym.Env) `reset()` is called by RLlib.\n\n 1) Episode(V2) created: Triggers callback `on_episode_created`.\n 2) Respective sub-environment (gym.Env) is `reset()`.\n 3) Episode(V2) starts: This callback fires.\n 4) Stepping through sub-environment/episode commences.\n\n Args:\n worker: Reference to the current rollout worker.\n base_env: BaseEnv running the episode. The underlying\n sub environment objects can be retrieved by calling\n `base_env.get_sub_environments()`.\n policies: Mapping of policy id to policy objects. In single\n agent mode there will only be a single \"default\" policy.\n episode: Episode object which contains the episode's\n state. You can use the `episode.user_data` dict to store\n temporary data, and `episode.custom_metrics` to store custom\n metrics for the episode.\n env_index: The index of the sub-environment that started the episode\n (within the vector of sub-environments of the BaseEnv).\n kwargs: Forward compatibility placeholder.\n \"\"\"\n env: ReactiveHarness[FireSimulation] = base_env.get_sub_environments()[env_index]\n\n if worker.config.in_evaluation:\n logger.info(\"Creating evaluation episode...\")\n # Ensure the evaluation env is rendering mode, if it should be.\n if env._should_render and not env.sim.rendering:\n logger.info(\"Enabling rendering for evaluation env.\")\n # TODO: Refactor below 3 lines into `env.render()` method?\n os.environ[\"SDL_VIDEODRIVER\"] = \"dummy\"\n base_env.get_sub_environments()[env_index].sim.reset()\n base_env.get_sub_environments()[env_index].sim.rendering = True\n elif not env._should_render and env.sim.rendering:\n logger.error(\n \"Simulation is in rendering mode, but `env._should_render` is False.\"\n )\n\n def on_episode_end(\n self,\n *,\n worker: \"RolloutWorker\",\n base_env: BaseEnv,\n policies: Dict[PolicyID, Policy],\n episode: Union[Episode, EpisodeV2, Exception],\n env_index: Optional[int] = None,\n **kwargs,\n ) -> None:\n \"\"\"Runs when an episode is done.\n\n Args:\n worker: Reference to the current rollout worker.\n base_env: BaseEnv running the episode. The underlying\n sub environment objects can be retrieved by calling\n `base_env.get_sub_environments()`.\n policies: Mapping of policy id to policy\n objects. In single agent mode there will only be a single\n \"default_policy\".\n episode: Episode object which contains episode\n state. You can use the `episode.user_data` dict to store\n temporary data, and `episode.custom_metrics` to store custom\n metrics for the episode.\n In case of environment failures, episode may also be an Exception\n that gets thrown from the environment before the episode finishes.\n Users of this callback may then handle these error cases properly\n with their custom logics.\n env_index: The index of the sub-environment that ended the episode\n (within the vector of sub-environments of the BaseEnv).\n kwargs: Forward compatibility placeholder.\n \"\"\"\n env: ReactiveHarness[FireSimulation] = base_env.get_sub_environments()[env_index]\n # Save a GIF from the last episode\n # TODO: Do we also want to save the fire spread graph?\n if worker.config.in_evaluation:\n logdir = env.trial_logdir\n eval_iters = env._num_eval_iters\n # Check if there is a gif \"ready\" to be saved\n if env._should_render and env.sim.rendering:\n # FIXME Update logic to handle saving same gif when writing to Aim UI\n gif_save_path = os.path.join(\n logdir, \"gifs\", f\"eval_iter_{eval_iters}.gif\"\n )\n # FIXME: Can we save each gif in a folder that relates it to episode iter?\n logger.info(f\"Saving GIF to {gif_save_path}...\")\n base_env.get_sub_environments()[env_index].sim.save_gif(gif_save_path)\n # Save the gif_path so that we can write image to aim server, if desired\n # NOTE: `save_path` is a list after the above; do element access for now\n logger.debug(f\"Type of gif_save_path: {type(gif_save_path)}\")\n episode.media.update({\"gif\": gif_save_path})\n\n # Try to collect and log episode history, if it was saved.\n if env.harness_analytics.sim_analytics.save_history:\n env.harness_analytics.save_sim_history(logdir, eval_iters)\n\n # sim.save_spread_graph(save_dir)\n\n def on_evaluate_start(\n self,\n *,\n algorithm: \"Algorithm\",\n **kwargs,\n ) -> None:\n \"\"\"Callback before evaluation starts.\n\n This method gets called at the beginning of Algorithm.evaluate().\n\n Args:\n algorithm: Reference to the algorithm instance.\n kwargs: Forward compatibility placeholder.\n \"\"\"\n # TODO: Add note in docs that the local worker IS NOT rendered. With this\n # assumption, we should always set `evaluation.evaluation_num_workers >= 1`.\n # TODO: Handle edge case where num_evaluation_workers == 0.\n logger.info(\"Starting evaluation...\")\n # Increment the number of evaluation iterations\n algorithm.evaluation_workers.foreach_worker(\n lambda w: w.foreach_env(lambda env: env._increment_evaluation_iterations()),\n local_worker=False,\n )\n # TODO: Use a function to decide if this round should be rendered (ie log10).\n # TODO: Additionally, log the total number of episodes run so far.\n # Enable the evaluation environment (s) to be rendered.\n algorithm.evaluation_workers.foreach_worker(\n lambda w: w.foreach_env(lambda env: env._configure_env_rendering(True)),\n local_worker=False,\n )\n\n def on_evaluate_end(\n self,\n *,\n algorithm: \"Algorithm\",\n evaluation_metrics: dict,\n **kwargs,\n ) -> None:\n \"\"\"Runs when the evaluation is done.\n\n Runs at the end of Algorithm.evaluate().\n\n Args:\n algorithm: Reference to the algorithm instance.\n evaluation_metrics: Results dict to be returned from algorithm.evaluate().\n You can mutate this object to add additional metrics.\n kwargs: Forward compatibility placeholder.\n \"\"\"\n # TODO: Add note in docs that the local worker IS NOT rendered. With this\n # assumption, we should always set `evaluation.evaluation_num_workers >= 1`.\n # TODO: Handle edge case where num_evaluation_workers == 0.\n\n # TODO: Use a function to decide if this round should be rendered (ie log10).\n # Disable the evaluation environment (s) to be rendered.\n algorithm.evaluation_workers.foreach_worker(\n lambda w: w.foreach_env(lambda env: env._configure_env_rendering(False)),\n local_worker=False,\n )" }, { "identifier": "AimLoggerCallback", "path": "simharness2/logger/aim.py", "snippet": "class AimLoggerCallback(LoggerCallback):\n \"\"\"Aim Logger: logs metrics in Aim format.\n\n Aim is an open-source, self-hosted ML experiment tracking tool.\n It's good at tracking lots (thousands) of training runs, and it allows you to\n compare them with a performant and well-designed UI.\n\n Source: https://github.com/aimhubio/aim\n\n Args:\n repo: Aim repository directory or a `Repo` object that the Run object will\n log results to. If not provided, a default repo will be set up in the\n experiment directory (one level above trial directories).\n experiment: Sets the `experiment` property of each Run object, which is the\n experiment name associated with it. Can be used later to query\n runs/sequences.\n If not provided, the default will be the Tune experiment name set\n by `RunConfig(name=...)`.\n metrics: List of metric names (out of the metrics reported by Tune) to\n track in Aim. If no metric are specified, log everything that\n is reported.\n aim_run_kwargs: Additional arguments that will be passed when creating the\n individual `Run` objects for each trial. For the full list of arguments,\n please see the Aim documentation:\n https://aimstack.readthedocs.io/en/latest/refs/sdk.html\n \"\"\"\n\n VALID_HPARAMS = (str, bool, int, float, list, type(None))\n VALID_NP_HPARAMS = (np.bool8, np.float32, np.float64, np.int32, np.int64)\n\n def __init__(\n self,\n repo: Optional[Union[str, \"Repo\"]] = None,\n metrics: Optional[List[str]] = None,\n cfg: Optional[DictConfig] = None,\n **aim_run_kwargs,\n ):\n \"\"\"See help(AimLoggerCallback) for more information about parameters.\"\"\"\n if Run is None:\n raise RuntimeError(\n \"aim must be installed!. You can install aim with\"\n \" the command: `pip install aim`.\"\n )\n\n self._repo_path = repo\n if not (bool(metrics) or metrics is None):\n raise ValueError(\n \"`metrics` must either contain at least one metric name, or be None, \"\n \"in which case all reported metrics will be logged to the aim repo.\"\n )\n self._metrics = metrics\n # NOTE: I think a shallow copy is okay here; better to use a copy for safety?\n log_hydra_cfg = aim_run_kwargs.pop(\"log_hydra_config\", None)\n self._cfg = cfg.copy() if log_hydra_cfg else None\n self._aim_run_kwargs = aim_run_kwargs\n self._trial_to_run: Dict[\"Trial\", Run] = {}\n\n def _create_run(self, trial: \"Trial\") -> Run:\n \"\"\"Initializes an Aim Run object for a given trial.\n\n Args:\n trial: The Tune trial that aim will track as a Run.\n\n Returns:\n Run: The created aim run for a specific trial.\n \"\"\"\n experiment_dir = trial.local_experiment_path\n run = Run(\n repo=self._repo_path or experiment_dir,\n **self._aim_run_kwargs,\n )\n # Attach a few useful trial properties\n run[\"trainable_name\"] = trial.trainable_name\n run[\"trial_id\"] = trial.trial_id\n run[\"trial_path\"] = trial.path\n run[\"trial_relative_logdir\"] = trial.relative_logdir\n # Log the (hydra) config if it exists\n if self._cfg:\n self._log_hydra_config(run)\n\n # Make temp file and write the trial params to it\n # tmp_path = os.path.join(trial.logdir, \"expr_param_file.json\")\n # with open(tmp_path, \"w\") as f:\n # json.dump(trial.config, f, indent=2, sort_keys=True, cls=SafeFallbackEncoder)\n # # Load temp file into dictionary, then delete the temp file\n # with open(tmp_path, \"r\") as f:\n # params_dict = json.load(f)\n\n # os.remove(tmp_path)\n\n # # Add params so that they will appear in `Run Params` in the Aim UI\n # for k, v in params_dict.items():\n # # Make sure v is not None, null, nan, etc.\n # if v and not np.isnan(v):\n # run[k] = v\n\n # with initialize(version_base=None, config_path=\"conf\"):\n # cfg = GlobalHydra().config_loader()\n # run[\"hparams\"] = cfg.load_configuration(config_name=\"config\", over)\n\n trial_ip = trial.get_runner_ip()\n if trial_ip:\n run[\"trial_ip\"] = trial_ip\n return run\n\n def log_trial_start(self, trial: \"Trial\"):\n \"\"\"Execute on trial start.\n\n Args:\n trial: The Tune trial that aim will track as a Run.\n \"\"\"\n if trial in self._trial_to_run:\n # Cleanup an existing run if the trial has been restarted\n self._trial_to_run[trial].close()\n\n trial.init_logdir()\n self._trial_to_run[trial] = self._create_run(trial)\n\n if trial.evaluated_params:\n self._log_trial_hparams(trial)\n\n def log_trial_result(self, iteration: int, trial: \"Trial\", result: Dict):\n \"\"\"Log a result.\n\n Args:\n iteration: The iteration number\n trial: The Tune trial that aim will track as a Run.\n result: Dictionary containing key:value information to log\n \"\"\"\n tmp_result = result.copy()\n\n step = result.get(TIMESTEPS_TOTAL, None) or result[TRAINING_ITERATION]\n episode = result.get(EPISODES_TOTAL, None)\n for k in [\"config\", \"pid\", \"timestamp\", TIME_TOTAL_S, TRAINING_ITERATION]:\n tmp_result.pop(k, None) # not useful to log these\n\n # `context` and `epoch` are special keys that users can report,\n # which are treated as special aim metrics/configurations.\n context = tmp_result.pop(\"context\", None)\n epoch = tmp_result.pop(\"epoch\", None)\n\n trial_run = self._trial_to_run[trial]\n path = [\"ray\", \"tune\"]\n\n # gif_path = './trajectory.gif'\n # aim_image = aim.Image(image=gif_path, format='gif')\n # aim_run.track(value=aim_image, name=name, step=step_, context=context)\n # NOTE: Gifs can only be saved to Aim UI if they are from evaluation episodes.\n if tmp_result.get(\"evaluation\", None):\n media: Dict[Any, List] = tmp_result[\"evaluation\"].pop(\"episode_media\", None)\n # Ensure that there is episode media to log\n if media and media.get(\"gif_data\", None):\n # Log gif for each episode added to episode media\n for gif_data in media.get(\"gif_data\"):\n # Prepare Aim Image object\n caption = gif_data.get(\"caption\", \"\")\n image_path = gif_data.get(\"path\", None)\n gif_img = Image(image_path, caption=caption)\n\n name = gif_data.get(\"name\", None)\n # Use specified \"step\"; default to total episodes run\n episode = gif_data.get(\"step\", episode)\n context = gif_data.get(\"context\", {})\n trial_run.track(\n gif_img,\n name=name,\n step=episode or step,\n context=context,\n )\n\n flat_result = flatten_dict(tmp_result, delimiter=\"/\")\n valid_result = {}\n\n for attr, value in flat_result.items():\n if self._metrics and attr not in self._metrics:\n continue\n\n full_attr = \"/\".join(path + [attr])\n if isinstance(value, tuple(VALID_SUMMARY_TYPES)) and not (\n np.isnan(value) or np.isinf(value)\n ):\n valid_result[attr] = value\n trial_run.track(\n value=value,\n name=full_attr,\n epoch=epoch,\n step=episode or step,\n context=context,\n )\n elif (isinstance(value, (list, tuple, set)) and len(value) > 0) or (\n isinstance(value, np.ndarray) and value.size > 0\n ):\n valid_result[attr] = value\n\n def log_trial_end(self, trial: \"Trial\", failed: bool = False):\n \"\"\"Execute on trial end.\n\n Args:\n trial: The Tune trial that aim will track as a Run.\n failed: Flag indicating whether or not the trial failed\n \"\"\"\n trial_run = self._trial_to_run.pop(trial)\n trial_run.close()\n\n def _log_trial_hparams(self, trial: \"Trial\"):\n \"\"\"Log Hyperparameters.\n\n Args:\n trial: The Tune trial that aim will track as a Run.\n \"\"\"\n params = flatten_dict(trial.evaluated_params, delimiter=\"/\")\n flat_params = flatten_dict(params)\n\n scrubbed_params = {\n k: v for k, v in flat_params.items() if isinstance(v, self.VALID_HPARAMS)\n }\n\n np_params = {\n k: v.tolist()\n for k, v in flat_params.items()\n if isinstance(v, self.VALID_NP_HPARAMS)\n }\n\n scrubbed_params.update(np_params)\n removed = {\n k: v\n for k, v in flat_params.items()\n if not isinstance(v, self.VALID_HPARAMS + self.VALID_NP_HPARAMS)\n }\n if removed:\n logger.info(\n \"Removed the following hyperparameter values when \" \"logging to aim: %s\",\n str(removed),\n )\n\n run = self._trial_to_run[trial]\n run[\"hparams\"] = scrubbed_params\n\n # def update_config(self, config: Dict):\n # self.config = config\n # config_out = os.path.join(self.logdir, EXPR_PARAM_FILE)\n # with open(config_out, \"w\") as f:\n # json.dump(self.config, f, indent=2, sort_keys=True, cls=SafeFallbackEncoder)\n\n def _log_hydra_config(self, run: Run):\n \"\"\"Log a subset of the hydra config to Aim as `Run Params`.\"\"\"\n for cfg_k, cfg_v in self._cfg.items():\n if cfg_k == \"simulation\":\n self._log_simulation_config(run, cfg_v)\n elif cfg_k == \"environment\":\n self._log_environment_config(run, cfg_v)\n elif cfg_k == \"evaluation\":\n self._log_evaluation_config(run, cfg_v)\n else:\n # Simple case, just log the config key and its contents.\n run[cfg_k] = instantiate(cfg_v)\n continue\n # run[k] = v\n # run[\"cfg\"] = self._cfg\n\n def _log_simulation_config(self, run: Run, cfg: DictConfig):\n \"\"\"Log the simulation config to Aim as `Run Params`.\"\"\"\n # NOTE: Both `train` and `eval` configs are logged, even if they are the same. In\n # TODO: In future, log `train` config and only log parameters within `eval`\n # config that differ from the `train` config (to reduce redundancy).\n run[\"simulation\"] = instantiate(cfg)\n # sim_cfg_flat = flatten_dict(instantiate(cfg_v), delimiter=\".\")\n # Log all training simulation parameters, and only log the evaluation\n # simulation parameters that are different from the training ones.\n # train_cfg = OmegaConf.to_container(instantiate(cfg_v[\"train\"]))\n # eval_cfg = OmegaConf.to_container(instantiate(cfg_v[\"eval\"]))\n # train_cfg_flat = flatten_dict(train_cfg, delimiter=\".\")\n # eval_cfg_flat = flatten_dict(eval_cfg, delimiter=\".\")\n\n # for k, tr_v in train_cfg_flat.items():\n # # Check if evaluation simulation has a different value for parameter.\n # eval_v = eval_cfg_flat.get(k, None)\n # if eval_v and tr_v != eval_v:\n # params_dict[\"simulation\"][\"eval\"].update({k: eval_v})\n # # Always log the training simulation parameters.\n # params_dict[\"simulation\"][\"train\"].update({k: tr_v})\n\n # Remove the `eval` dict if it is empty.\n # if not params_dict[\"simulation\"][\"eval\"]:\n # params_dict[\"simulation\"].pop(\"eval\")\n\n # train_set = set(train_cfg_flat.items())\n # eval_set = set(eval_cfg_flat.items())\n\n def _log_environment_config(self, run: Run, cfg: DictConfig):\n \"\"\"Log the environment config to Aim as `Run Params`.\"\"\"\n env_cfg = instantiate(cfg)\n\n if env_cfg.env_config.get(\"sim\"):\n sim_obj = env_cfg.env_config.sim\n # Intention: create a (dotpath) string representation of `sim_obj`.\n if not isinstance(sim_obj, str):\n sim_obj = \".\".join(\n [sim_obj.__class__.__module__, sim_obj.__class__.__name__]\n )\n env_cfg.env_config.sim = sim_obj\n\n # NOTE: If-else here because `benchmark_sim` is an optional argument.\n if env_cfg.env_config.get(\"benchmark_sim\"):\n # If not None, then assume `benchmark_sim` uses SAME class as `sim`.\n env_cfg.env_config.benchmark_sim = sim_obj\n else:\n env_cfg.env_config.benchmark_sim = None\n\n if env_cfg.env_config.get(\"action_space_type\"):\n # Intention: create a (dotpath) string representation of `action_space_type`.\n action_space_type = env_cfg.env_config.action_space_type\n if isinstance(action_space_type, partial):\n action_space_type = action_space_type.func\n if not isinstance(action_space_type, str):\n action_space_type = \".\".join(\n [\n action_space_type.__module__,\n action_space_type.__name__,\n ]\n )\n env_cfg.env_config.action_space_type = action_space_type\n\n run[\"environment\"] = env_cfg\n\n def _log_evaluation_config(self, run: Run, cfg: DictConfig):\n \"\"\"Log the evaluation config to Aim as `Run Params`.\"\"\"\n eval_cfg_settings = instantiate(cfg.evaluation_config)\n\n if eval_cfg_settings.env_config.get(\"sim\"):\n sim_obj = eval_cfg_settings.env_config.sim\n # Intention: create a (dotpath) string representation of `sim_obj`.\n if not isinstance(sim_obj, str):\n sim_obj = \".\".join(\n [sim_obj.__class__.__module__, sim_obj.__class__.__name__]\n )\n eval_cfg_settings.env_config.sim = sim_obj\n\n # NOTE: If-else here because `benchmark_sim` is an optional argument.\n if eval_cfg_settings.env_config.get(\"benchmark_sim\"):\n # If not None, then assume `benchmark_sim` uses SAME class as `sim`.\n eval_cfg_settings.env_config.benchmark_sim = sim_obj\n else:\n eval_cfg_settings.env_config.benchmark_sim = None\n\n if eval_cfg_settings.env_config.get(\"action_space_type\"):\n # Intention: create a (dotpath) string representation of `action_space_type`.\n action_space_type = eval_cfg_settings.env_config.action_space_type\n if isinstance(action_space_type, partial):\n action_space_type = action_space_type.func\n if not isinstance(action_space_type, str):\n action_space_type = \".\".join(\n [\n action_space_type.__module__,\n action_space_type.__name__,\n ]\n )\n eval_cfg_settings.env_config.action_space_type = action_space_type\n\n cfg.evaluation_config = eval_cfg_settings\n run[\"evaluation\"] = cfg" } ]
import logging import os import hydra import numpy as np import ray import simharness2.models # noqa from importlib import import_module from typing import Any, Dict, Tuple from hydra.core.hydra_config import HydraConfig from hydra.utils import instantiate from omegaconf import DictConfig, OmegaConf from ray import air, tune from ray.rllib.algorithms.algorithm import Algorithm from ray.rllib.algorithms.algorithm_config import AlgorithmConfig from ray.tune.logger import pretty_print from ray.tune.registry import get_trainable_cls, register_env from ray.tune.result_grid import ResultGrid from simfire.enums import BurnStatus from simharness2.callbacks.render_env import RenderEnv from simharness2.logger.aim import AimLoggerCallback
8,062
for i in range(stop_cond.training_iteration): LOGGER.info(f"Training iteration {i}.") result = algo.train() LOGGER.info(f"{pretty_print(result)}\n") if i % cfg.checkpoint.checkpoint_frequency == 0: ckpt_path = algo.save() log_str = f"A checkpoint has been created inside directory: {ckpt_path}.\n" LOGGER.info(log_str) if ( result["timesteps_total"] >= stop_cond.timesteps_total or result["episode_reward_mean"] >= stop_cond.episode_reward_mean ): LOGGER.warning(f"Training stopped short at iteration {i}.\n") ts = result["timesteps_total"] mean_rew = result["episode_reward_mean"] LOGGER.info(f"Timesteps: {ts}\nEpisode_Mean_Rewards: {mean_rew}\n") break model_path = algo.save() LOGGER.info(f"The final model has been saved inside directory: {model_path}.") algo.stop() def _instantiate_config( cfg: DictConfig, ) -> Tuple[Dict[str, Any], Dict[str, Any], Dict[str, Any], Dict[str, Any]]: """Instantiate the algorithm config used to build the RLlib training algorithm. Args: cfg (DictConfig): Hydra config with all required parameters. Returns: Tuple[Dict[str, Any], Dict[str, Any], Dict[str, Any], Dict[str, Any]]: env_settings: Parameters needed for instantiating the environment eval_settings: Parameters needed for running the evaluation code. debug_settings: Settings needed for debugging. exploration_cfg: RLlib exploration configurations. """ # Instantiate the env and eval settings objects from the config. # NOTE: We are instantiating to a NEW object on purpose; otherwise a # `TypeError` will be raised when attempting to log the cfg to Aim. env_settings = instantiate(cfg.environment, _convert_="partial") eval_settings = instantiate(cfg.evaluation, _convert_="partial") # FIXME: Fire scenario configuration disabled for now. Fix this in new MR. # Get the operational fires we want to run evaluation with # operational_fires = get_default_operational_fires(cfg) # Inject operational fires into the evaluation settings # eval_settings["evaluation_config"]["env_config"].update( # {"scenarios": operational_fires} # ) # Prepare exploration options for the algorithm exploration_cfg = OmegaConf.to_container( cfg=cfg.exploration.exploration_config, resolve=True ) # If no `type` is given, tune's `UnifiedLogger` is used as follows: # DEFAULT_LOGGERS = (JsonLogger, CSVLogger, TBXLogger) # `UnifiedLogger(config, self._logdir, loggers=DEFAULT_LOGGERS)` # - The `logger_config` defined below is used here: # https://github.com/ray-project/ray/blob/863928c4f13b66465399d63e01df3c446b4536d9/rllib/algorithms/algorithm.py#L423 # - The `Trainable._create_logger` method can be found here: # https://github.com/ray-project/ray/blob/8d2dc9a3997482100034b60568b06aad7fd9fc59/python/ray/tune/trainable/trainable.py#L1067 debug_settings = instantiate(cfg.debugging, _convert_="partial") # Register the environment with Ray # NOTE: Assume that same environment cls is used for training and evaluation. # TODO: This blocks us from being able to have `view()` can we change this? env_module, env_cls = cfg.environment.env.rsplit(".", 1) env_cls = getattr(import_module(env_module), env_cls) register_env(cfg.environment.env, lambda config: env_cls(**config)) return env_settings, eval_settings, debug_settings, exploration_cfg def _build_algo_cfg(cfg: DictConfig) -> Tuple[Algorithm, AlgorithmConfig]: """Build the algorithm config and object for training an RLlib model. Args: cfg (DictConfig): Hydra config with all required parameters. Returns: Tuple(Algorithm, AlgorithmConfig): Training algorithm and associated config. """ # Instantiate everything necessary for creating the algorithm config. env_settings, eval_settings, debug_settings, explor_cfg = _instantiate_config(cfg) # Manually prepare agent_ids using same logic as within environments/rl_harness.py num_agents = env_settings["env_config"].get("num_agents", 1) interacts = env_settings["env_config"]["interactions"] # map sh2 interactions to underlying BurnStatus category interacts_map = { "fireline": BurnStatus.FIRELINE, "wetline": BurnStatus.WETLINE, "scratchline": BurnStatus.SCRATCHLINE, } agent_id_start = ( max(set([int(v) for k, v in interacts_map.items() if k in interacts])) + 1 ) agent_id_stop = agent_id_start + num_agents sim_agent_ids = np.arange(agent_id_start, agent_id_stop) # FIXME: Usage of "agent_{}" doesn't allow us to delineate agents groups. agent_ids = {f"agent_{i}" for i in sim_agent_ids} algo_cfg = ( get_trainable_cls(cfg.algo.name) .get_default_config() .training(**cfg.training) .environment(**env_settings) .framework(**cfg.framework) .rollouts(**cfg.rollouts) .evaluation(**eval_settings) .exploration(explore=cfg.exploration.explore, exploration_config=explor_cfg) .resources(**cfg.resources) .debugging(**debug_settings)
"""FIXME: A one line summary of the module or program. Leave one blank line. The rest of this docstring should contain an overall description of the module or program. Optionally, it may also contain a brief description of exported classes and functions and/or usage examples. Typical usage example: foo = ClassFoo() bar = foo.FunctionBar() """ # from simharness2.utils.evaluation_fires import get_default_operational_fires # from simharness2.callbacks.set_env_seeds_callback import SetEnvSeedsCallback os.environ["HYDRA_FULL_ERROR"] = "1" # Register custom resolvers that are used within the config files OmegaConf.register_new_resolver("operational_screen_size", lambda x: int(x * 39)) OmegaConf.register_new_resolver("calculate_half", lambda x: int(x / 2)) OmegaConf.register_new_resolver("square", lambda x: x**2) LOGGER = logging.getLogger(__name__) def _set_variable_hyperparameters(algo_cfg: AlgorithmConfig, cfg: DictConfig) -> None: """Override the algo_cfg hyperparameters we would like to tune over. Args: algo_cfg (AlgorithmConfig): Config used for training our model. cfg (DictConfig): Hydra config with all required parameters. """ tunables = OmegaConf.to_container(cfg.tunables, resolve=True) for section_key, param_dict in tunables.items(): for key, value in param_dict.items(): if value["type"] == "loguniform": sampler = tune.loguniform(value["values"][0], value["values"][1]) elif value["type"] == "uniform": sampler = tune.uniform(value["values"][0], value["values"][1]) elif value["type"] == "random": sampler = tune.randint(value["values"][0], value["values"][1]) elif value["type"] == "choice": sampler = tune.choice(value["values"]) else: LOGGER.error(f"Invalid value type {value['type']} given - skipping.") tunables[section_key][key] = sampler algo_cfg.training(**tunables["training"]) def train_with_tune(algo_cfg: AlgorithmConfig, cfg: DictConfig) -> ResultGrid: """Iterate through combinations of hyperparameters to find optimal training runs. Args: algo_cfg (AlgorithmConfig): Algorithm config for RLlib. cfg (DictConfig): Hydra config with all required parameters. Returns: ResultGrid: Set of Results objects from running Tuner.fit() """ trainable_algo_str = cfg.algo.name param_space = algo_cfg # Override the variables we want to tune on ()`param_space` is updated in-place). if cfg.tunables: _set_variable_hyperparameters(algo_cfg=param_space, cfg=cfg) # Configs for this specific trial run run_config = air.RunConfig( name=cfg.run.name or None, storage_path=cfg.run.storage_path, stop={**cfg.stop_conditions}, callbacks=[AimLoggerCallback(cfg=cfg, **cfg.aim)], failure_config=None, sync_config=tune.SyncConfig(syncer=None), # Disable syncing checkpoint_config=air.CheckpointConfig(**cfg.checkpoint), log_to_file=cfg.run.log_to_file, ) # TODO make sure 'reward' is reported with tune.report() # TODO add this to config # Config for the tuning process (used for all trial runs) # tune_config = tune.TuneConfig(num_samples=4) # Create a Tuner tuner = tune.Tuner( trainable=trainable_algo_str, param_space=param_space, run_config=run_config, # tune_config=tune_config, ) results = tuner.fit() result_df = results.get_dataframe() logging.debug(result_df) return results def train(algo: Algorithm, cfg: DictConfig) -> None: """Train the given algorithm within RLlib. Args: algo (Algorithm): Algorithm to train with. cfg (DictConfig): Hydra config with all required parameters for training. """ stop_cond = cfg.stop_conditions # Run training loop and print results after each iteration for i in range(stop_cond.training_iteration): LOGGER.info(f"Training iteration {i}.") result = algo.train() LOGGER.info(f"{pretty_print(result)}\n") if i % cfg.checkpoint.checkpoint_frequency == 0: ckpt_path = algo.save() log_str = f"A checkpoint has been created inside directory: {ckpt_path}.\n" LOGGER.info(log_str) if ( result["timesteps_total"] >= stop_cond.timesteps_total or result["episode_reward_mean"] >= stop_cond.episode_reward_mean ): LOGGER.warning(f"Training stopped short at iteration {i}.\n") ts = result["timesteps_total"] mean_rew = result["episode_reward_mean"] LOGGER.info(f"Timesteps: {ts}\nEpisode_Mean_Rewards: {mean_rew}\n") break model_path = algo.save() LOGGER.info(f"The final model has been saved inside directory: {model_path}.") algo.stop() def _instantiate_config( cfg: DictConfig, ) -> Tuple[Dict[str, Any], Dict[str, Any], Dict[str, Any], Dict[str, Any]]: """Instantiate the algorithm config used to build the RLlib training algorithm. Args: cfg (DictConfig): Hydra config with all required parameters. Returns: Tuple[Dict[str, Any], Dict[str, Any], Dict[str, Any], Dict[str, Any]]: env_settings: Parameters needed for instantiating the environment eval_settings: Parameters needed for running the evaluation code. debug_settings: Settings needed for debugging. exploration_cfg: RLlib exploration configurations. """ # Instantiate the env and eval settings objects from the config. # NOTE: We are instantiating to a NEW object on purpose; otherwise a # `TypeError` will be raised when attempting to log the cfg to Aim. env_settings = instantiate(cfg.environment, _convert_="partial") eval_settings = instantiate(cfg.evaluation, _convert_="partial") # FIXME: Fire scenario configuration disabled for now. Fix this in new MR. # Get the operational fires we want to run evaluation with # operational_fires = get_default_operational_fires(cfg) # Inject operational fires into the evaluation settings # eval_settings["evaluation_config"]["env_config"].update( # {"scenarios": operational_fires} # ) # Prepare exploration options for the algorithm exploration_cfg = OmegaConf.to_container( cfg=cfg.exploration.exploration_config, resolve=True ) # If no `type` is given, tune's `UnifiedLogger` is used as follows: # DEFAULT_LOGGERS = (JsonLogger, CSVLogger, TBXLogger) # `UnifiedLogger(config, self._logdir, loggers=DEFAULT_LOGGERS)` # - The `logger_config` defined below is used here: # https://github.com/ray-project/ray/blob/863928c4f13b66465399d63e01df3c446b4536d9/rllib/algorithms/algorithm.py#L423 # - The `Trainable._create_logger` method can be found here: # https://github.com/ray-project/ray/blob/8d2dc9a3997482100034b60568b06aad7fd9fc59/python/ray/tune/trainable/trainable.py#L1067 debug_settings = instantiate(cfg.debugging, _convert_="partial") # Register the environment with Ray # NOTE: Assume that same environment cls is used for training and evaluation. # TODO: This blocks us from being able to have `view()` can we change this? env_module, env_cls = cfg.environment.env.rsplit(".", 1) env_cls = getattr(import_module(env_module), env_cls) register_env(cfg.environment.env, lambda config: env_cls(**config)) return env_settings, eval_settings, debug_settings, exploration_cfg def _build_algo_cfg(cfg: DictConfig) -> Tuple[Algorithm, AlgorithmConfig]: """Build the algorithm config and object for training an RLlib model. Args: cfg (DictConfig): Hydra config with all required parameters. Returns: Tuple(Algorithm, AlgorithmConfig): Training algorithm and associated config. """ # Instantiate everything necessary for creating the algorithm config. env_settings, eval_settings, debug_settings, explor_cfg = _instantiate_config(cfg) # Manually prepare agent_ids using same logic as within environments/rl_harness.py num_agents = env_settings["env_config"].get("num_agents", 1) interacts = env_settings["env_config"]["interactions"] # map sh2 interactions to underlying BurnStatus category interacts_map = { "fireline": BurnStatus.FIRELINE, "wetline": BurnStatus.WETLINE, "scratchline": BurnStatus.SCRATCHLINE, } agent_id_start = ( max(set([int(v) for k, v in interacts_map.items() if k in interacts])) + 1 ) agent_id_stop = agent_id_start + num_agents sim_agent_ids = np.arange(agent_id_start, agent_id_stop) # FIXME: Usage of "agent_{}" doesn't allow us to delineate agents groups. agent_ids = {f"agent_{i}" for i in sim_agent_ids} algo_cfg = ( get_trainable_cls(cfg.algo.name) .get_default_config() .training(**cfg.training) .environment(**env_settings) .framework(**cfg.framework) .rollouts(**cfg.rollouts) .evaluation(**eval_settings) .exploration(explore=cfg.exploration.explore, exploration_config=explor_cfg) .resources(**cfg.resources) .debugging(**debug_settings)
.callbacks(RenderEnv)
0
2023-12-08 19:13:31+00:00
12k
racinette/querky
querky/querky.py
[ { "identifier": "one_", "path": "querky/result_shape.py", "snippet": "def one_(typename: str | None, *, optional: bool = True) -> typing.Callable[[Query], ResultShape]:\n def late_binding(query: Query) -> One:\n return One(query, typename, optional=optional)\n return late_binding" }, { "identifier": "all_", "path": "querky/result_shape.py", "snippet": "def all_(typename: str | None) -> typing.Callable[[Query], ResultShape]:\n def late_binding(query: Query) -> All:\n return All(query, typename)\n return late_binding" }, { "identifier": "value_", "path": "querky/result_shape.py", "snippet": "def value_(annotation: str | TypeMetaData | None = None, *, optional: bool = False) -> typing.Callable[[Query], ResultShape]:\n def late_binding(query: Query) -> Value:\n return Value(query, annotation, optional=optional)\n return late_binding" }, { "identifier": "status_", "path": "querky/result_shape.py", "snippet": "def status_() -> typing.Callable[[Query], ResultShape]:\n def late_binding(query: Query) -> Status:\n return Status(query)\n return late_binding" }, { "identifier": "column_", "path": "querky/result_shape.py", "snippet": "def column_(annotation: str | TypeMetaData | None = None, *, elem_optional: bool = False) -> typing.Callable[[Query], ResultShape]:\n def late_binding(query: Query) -> Value:\n return Column(query, annotation, elem_optional=elem_optional)\n return late_binding" }, { "identifier": "One", "path": "querky/result_shape.py", "snippet": "class One(ResultShape):\n def __init__(self, query: Query, typename: str | None, *, optional: bool = True):\n super().__init__(query)\n\n if self.query.parent_query is None:\n if self.querky.type_factory is not None:\n self.ctor = self.querky.type_factory(self.query, typename)\n else:\n self.ctor = None\n else:\n # забираем конструктор типа из базового запроса\n parent_shape = self.query.parent_query.shape\n if not isinstance(parent_shape, (All, One)):\n raise ValueError(\"Invalid shape, must be a row shape\")\n\n self.ctor = parent_shape.ctor\n # копируем название типа из отеческого запроса\n typename = parent_shape.ctor.typename\n\n if self.ctor.shape is None:\n self.ctor.shape = self\n\n self.optional = optional\n if self.ctor is not None:\n type_meta = TypeMetaData(typename)\n else:\n type_meta = self.query.contract.get_default_record_type_metadata()\n self.return_type = TypeKnowledge(\n metadata=type_meta,\n is_optional=self.optional,\n is_array=False,\n elem_is_optional=None\n )\n self.annotate()\n\n def annotate(self):\n self.query.annotation_generator.annotate(self.return_type, context='result_type')\n\n def set_attributes(self, attrs: typing.Tuple[ResultAttribute, ...]):\n for attribute in self.query.query_signature.attributes:\n try:\n if attr_hint := self.query.attr_hints.get(attribute.name, None):\n attribute.consume_attr(attr_hint)\n self.query.annotation_generator.annotate(attribute.type_knowledge, 'attribute')\n except Exception as ex:\n raise QueryInitializationError(self.query, f\"attribute `{attribute.name}`\") from ex\n if self.ctor is not None:\n if self.ctor.attributes is None:\n self.ctor.set_attributes(attrs)\n elif self.ctor.attributes != attrs:\n raise QueryInitializationError(\n self.query,\n \"Expected the same return type signature, but the attributes are not equal:\\n\"\n f\"Expected: {self.ctor.attributes}\\n\"\n f\"Got: {attrs}\"\n )\n\n def generate_type_code(self) -> typing.List[str] | None:\n if self.ctor is not None and not self.ctor.type_code_generated:\n return self.ctor.generate_type_code()\n else:\n return None\n\n def get_imports(self) -> set[str]:\n s = super().get_imports()\n if self.ctor is not None:\n return s.union(self.ctor.get_imports())\n return s\n\n async def fetch(self, conn, params):\n contract = self.query.module.querky.contract\n row = await contract.fetch_one(conn, self.query, params)\n if self.ctor.row_factory and row is not None:\n row = self.ctor.row_factory(row)\n return row\n\n def fetch_sync(self, conn, params):\n contract = self.query.module.querky.contract\n row = contract.fetch_one_sync(conn, self.query, params)\n if self.ctor.row_factory:\n row = self.ctor.row_factory(row)\n return row\n\n def get_exports(self) -> typing.Sequence[str]:\n if self.ctor is not None:\n return [self.ctor.get_exported_name()]\n else:\n return []" }, { "identifier": "All", "path": "querky/result_shape.py", "snippet": "class All(One):\n def __init__(self, query: Query, typename: str | None,):\n super().__init__(query, typename, optional=False)\n self.return_type.is_optional = False\n self.return_type.is_array = True\n self.return_type.elem_is_optional = False\n self.query.annotation_generator.annotate(self.return_type, context='result_type')\n\n def annotate(self):\n pass\n\n async def fetch(self, conn, params):\n contract = self.query.module.querky.contract\n rows = await contract.fetch_all(conn, self.query, params)\n if self.ctor.row_factory:\n rows = [\n self.ctor.row_factory(row)\n for row in rows\n ]\n return rows\n\n def fetch_sync(self, conn, params):\n contract = self.query.module.querky.contract\n rows = contract.fetch_all_sync(conn, self.query, params)\n if self.ctor.row_factory:\n rows = [\n self.ctor.row_factory(row)\n for row in rows\n ]\n return rows" }, { "identifier": "ResultShape", "path": "querky/result_shape.py", "snippet": "class ResultShape(ABC, GetImportsMixin):\n def __init__(self, query: Query) -> None:\n self.query: Query = query\n self.return_type: TypeKnowledge | None = None\n\n @property\n def querky(self):\n return self.query.querky\n\n def get_imports(self) -> set[str]:\n return self.return_type.get_imports()\n\n @abstractmethod\n def set_attributes(self, attrs: typing.Tuple[ResultAttribute, ...]):\n ...\n\n @abstractmethod\n def generate_type_code(self) -> typing.List[str] | None:\n ...\n\n def get_annotation(self) -> str:\n return self.return_type.typehint\n\n @abstractmethod\n async def fetch(self, conn, bound_params):\n ...\n\n @abstractmethod\n async def fetch_sync(self, conn, bound_params):\n ...\n\n @abstractmethod\n def get_exports(self) -> typing.Sequence[str]:\n ..." }, { "identifier": "ConnParamConfig", "path": "querky/conn_param_config.py", "snippet": "class ConnParamConfig:\n name: str\n\n def create_parameter(\n self,\n query: Query,\n parameters: typing.Sequence[Parameter],\n type_metadata: TypeMetaData\n ) -> tuple[Parameter, TypeKnowledge, int]:\n ..." }, { "identifier": "First", "path": "querky/conn_param_config.py", "snippet": "class First(ConnParamConfig):\n positional: bool = False\n\n def create_parameter(\n self,\n _query: Query,\n parameters: typing.Sequence[Parameter],\n type_metadata: TypeMetaData\n ) -> tuple[Parameter, TypeKnowledge, int]:\n if self.positional:\n kind = Parameter.POSITIONAL_ONLY\n else:\n if parameters and parameters[0].kind == Parameter.POSITIONAL_ONLY:\n kind = Parameter.POSITIONAL_ONLY\n else:\n kind = Parameter.POSITIONAL_OR_KEYWORD\n\n p = Parameter(self.name, kind)\n return p, TypeKnowledge(type_metadata, False, False, False), 0" }, { "identifier": "AnnotationGenerator", "path": "querky/annotation_generator.py", "snippet": "class AnnotationGenerator(ABC):\n @abstractmethod\n def generate(self, knowledge: TypeKnowledge, context: str) -> str:\n ...\n\n def annotate(self, knowledge: TypeKnowledge, context: str, force: bool = False) -> None:\n if knowledge.typehint is None or force:\n knowledge.typehint = self.generate(knowledge, context)" }, { "identifier": "TypeConstructor", "path": "querky/type_constructor.py", "snippet": "class TypeConstructor(typing.Generic[T], GetImportsMixin):\n def __init__(\n self,\n query: Query,\n typename: str,\n required_imports: typing.Set[str],\n row_factory: typing.Callable[[typing.Any], T] | None\n ):\n self.query = query\n self.type_code_generated = False\n self.typename = typename\n self.required_imports = required_imports\n self.shape: typing.Optional[ResultShape] = None\n self.attributes: typing.Optional[typing.Tuple[ResultAttribute, ...]] = None\n self.row_factory = row_factory\n self.type_code_generated: bool = False\n self.attributes_collected: bool = False\n\n def set_attributes(self, attrs: typing.Tuple[ResultAttribute, ...]):\n self.attributes = attrs\n\n def get_imports(self) -> set[str]:\n s = set(self.required_imports)\n for attr in self.attributes:\n s.update(attr.get_imports())\n return s\n\n def get_exported_name(self) -> str:\n return self.typename\n\n def indent(self, i: int) -> str:\n return self.shape.query.querky.get_indent(i)" }, { "identifier": "ModuleConstructor", "path": "querky/module_constructor.py", "snippet": "class ModuleConstructor:\n def __init__(\n self,\n querky: Querky,\n module: types.ModuleType,\n fullpath: str,\n module_path: str,\n filedir: str\n ):\n self.module = module\n self.querky = querky\n self.imports = set(querky.imports)\n self.exports = set()\n self.fullpath = fullpath\n self.module_path = module_path\n self.filedir = filedir\n\n self.queries_list = []\n\n def indent(self, i: int) -> str:\n return self.querky.get_indent(i)\n\n def _post_init(self):\n # Generate module code\n code = []\n for query in self.queries_list:\n query_code = query.generate_code()\n if not query_code:\n continue\n code.append('')\n code.append('')\n code.extend(query_code)\n code.append('')\n\n # Collect imports\n for query in self.queries_list:\n self.imports.update(query.get_imports())\n\n # Collect exports\n for query in self.queries_list:\n self.exports.update(query.get_exports())\n\n # Create import lines\n imports = [\n *getattr(self.module, '__imports__', []),\n *self.imports\n ]\n\n for query in self.queries_list:\n imports.append(\n f\"from {self.module.__name__} import {query.query.__name__} as {query.local_name}\"\n )\n\n # Imports + Code\n code = [\n *imports,\n *code,\n ]\n\n # If there are exports, create them at the end of the file (__all__)\n if self.exports:\n code.append('')\n code.append('__all__ = [')\n for export in self.exports:\n code.append(f'{self.indent(1)}\"{export}\",')\n code.append(']')\n code.append('')\n\n self.querky.sign_file_contents(code)\n\n code = '\\n'.join(code)\n\n # checking, if file already exists\n file_exists = path.isfile(self.fullpath)\n if file_exists:\n # check, if we can overwrite the contents\n self.querky.check_file_is_mine(self.fullpath)\n\n if self.querky.subdir:\n os.makedirs(self.filedir, exist_ok=True)\n\n with open(self.fullpath, encoding='utf-8', mode='w') as f:\n f.write(code)\n\n async def generate_module(self, db):\n for query in self.queries_list:\n await query.fetch_types(db)\n self._post_init()\n\n def generate_module_sync(self, db):\n for query in self.queries_list:\n query.fetch_types_sync(db)\n self._post_init()" }, { "identifier": "TypeMetaData", "path": "querky/base_types.py", "snippet": "class TypeMetaData(GetImportsMixin):\n counterpart: str\n required_imports: set[str] | None = None\n\n def get_imports(self) -> set[str]:\n if self.required_imports is None:\n return set()\n return set(self.required_imports)\n\n @classmethod\n def from_type(cls, t: typing.Type) -> TypeMetaData:\n type_name = t.__name__\n module_path = t.__module__\n return TypeMetaData(\n counterpart=type_name,\n required_imports={f\"from {module_path} import {type_name}\"}\n )" }, { "identifier": "Query", "path": "querky/query.py", "snippet": "class Query(typing.Generic[RS]):\n defaults: dict[str, typing.Any]\n\n def __init__(\n self,\n func: typing.Callable,\n shape: typing.Callable[[Query], RS],\n module: ModuleConstructor,\n conn_param_config: ConnParamConfig,\n explicit_name: typing.Optional[str],\n parent_query: typing.Optional[Query[One | All]],\n kwargs: typing.Optional[typing.Dict[str, typing.Any]]\n ) -> None:\n self.parent_query: Query[One | All] | None = parent_query\n\n self.imports = set()\n self.kwargs = kwargs or dict()\n self.query = func\n self.name = explicit_name or func.__name__\n self.conn_param_config = conn_param_config\n\n self.sig = inspect.signature(func)\n self.template_signature = None\n\n self.module = module\n self.module.queries_list.append(self)\n\n self.param_mapper: ParamMapper = self.contract.create_param_mapper(self)\n self.sql = self.param_mapper.parametrize_query()\n self.default = DictGetAttr(self.param_mapper.defaults)\n # side effect: attr gets populated, so we flush it\n self.attr_hints: dict[str, Attr] = {\n a.name: a\n for a in _attr_.__getattrs__()\n }\n\n module_filename = self.module.module.__file__\n common = path.commonprefix([module.querky.basedir, module_filename])\n self.relative_path = module_filename[len(common):]\n self.unique_name = f\"{self.relative_path}:{self.query.__name__}\"\n self.local_name = self.get_local_name()\n\n self.query_signature: QuerySignature | None = None\n self.conn_type_knowledge: TypeKnowledge | None = None\n\n self.bound_type = None\n self.shape: ResultShape = shape(self)\n\n if not isinstance(self.shape, (One, All)) and parent_query:\n raise ValueError(\"Only One and All queries can have a parent query.\")\n if parent_query and not isinstance(parent_query.shape, (One, All)):\n raise ValueError(\"Parent query must be of either One or All shape.\")\n\n logger.debug(\n \"Query: %s\\nSQL: %s\",\n self.unique_name, self.sql\n )\n\n @property\n def annotation_generator(self):\n return self.querky.annotation_generator\n\n @property\n def contract(self):\n return self.module.querky.contract\n\n @property\n def querky(self):\n return self.module.querky\n\n def bind_type(self, t) -> None:\n self.bound_type = t\n\n async def execute(self, conn, *args, **kwargs):\n params = self.param_mapper.map_params(*args, **kwargs)\n return await self.shape.fetch(conn, params)\n\n def execute_sync(self, conn, *args, **kwargs):\n params = self.param_mapper.map_params(*args, **kwargs)\n return self.shape.fetch_sync(conn, params)\n\n def _after_types_fetched(self):\n # типы параметров передадим мапперу\n self.param_mapper.assign_type_knowledge(self.query_signature.parameters)\n # а типы аттрибутов - результату\n self.shape.set_attributes(self.query_signature.attributes)\n\n async def fetch_types(self, db) -> None:\n try:\n self.query_signature = await self.contract.get_query_signature(db, self)\n self._after_types_fetched()\n except QueryInitializationError:\n raise\n except Exception as ex:\n raise QueryInitializationError(self, additional_hint=\"fetching types\") from ex\n\n def fetch_types_sync(self, db) -> None:\n try:\n self.query_signature = self.contract.get_query_signature_sync(db, self)\n self._after_types_fetched()\n except QueryInitializationError:\n raise\n except Exception as ex:\n raise QueryInitializationError(self, additional_hint=\"fetching types\") from ex\n\n def string_signature(self):\n return f\"{self.relative_path}: {self.query.__name__}{self.sig}\"\n\n def get_local_name(self) -> str:\n return f\"_q{self.module.queries_list.index(self)}\"\n\n def _generate_proxy_function_code(self):\n try:\n new_params = []\n\n for param in self.param_mapper.params:\n name = param.name\n\n old_param = param.param\n\n if old_param.default is not inspect._empty:\n default = ReprHelper(f\"{self.local_name}.default.{name}\")\n else:\n default = inspect._empty\n\n typehint = param.type_knowledge.typehint\n if typehint is None:\n raise QueryInitializationError(\n self,\n f\"{param.name}: parameter type annotation is missing\"\n )\n\n new_params.append(\n Parameter(\n name,\n old_param.kind,\n annotation=ReprHelper(typehint),\n default=default\n )\n )\n\n conn_param, type_knowledge, index = self.conn_param_config.create_parameter(\n self,\n new_params,\n self.contract.get_connection_type_metadata()\n )\n self.conn_type_knowledge = type_knowledge\n self.annotation_generator.annotate(type_knowledge, context='conn_param')\n if type_knowledge.typehint is not None:\n conn_param = conn_param.replace(annotation=ReprHelper(type_knowledge.typehint))\n\n new_params.insert(index, conn_param)\n\n return_annotation = self.shape.get_annotation()\n if return_annotation is None:\n raise QueryInitializationError(\n self,\n f\"return type annotation is missing\"\n )\n\n return_annotation_repr = ReprHelper(return_annotation)\n\n self.new_signature = self.sig.replace(\n parameters=new_params,\n return_annotation=return_annotation_repr\n )\n\n is_async = self.contract.is_async()\n async_ = 'async ' if is_async else ''\n await_ = 'await ' if is_async else ''\n _sync = \"_sync\" if not is_async else ''\n\n conn_str = self.conn_param_config.name\n\n arg_remap_string = self.param_mapper.mirror_arguments()\n arg_string = f\"{conn_str}, {arg_remap_string}\"\n\n try:\n code = [\n f\"{async_}def {self.name}{self.new_signature}:\",\n f\"{self.querky.get_indent(1)}return {await_}{self.local_name}.execute{_sync}({arg_string})\"\n ]\n except Exception as _ex:\n # for debugging\n raise\n\n logger.debug('[OK] - %s', self.unique_name)\n return code\n except Exception as ex:\n logger.exception('[BAD] - %s', self.unique_name)\n raise ex\n\n def get_type_bind_ident(self) -> typing.Optional[str]:\n if isinstance(self.shape, (Value, Column, Status)):\n return None\n elif isinstance(self.shape, (One, All)):\n if self.shape.ctor:\n return self.shape.ctor.typename\n return None\n\n def get_exports(self):\n exports = {\n self.name,\n *self.shape.get_exports()\n }\n if parent := self.parent_query:\n parent_shape = parent.shape\n if not isinstance(parent_shape, (One, All)):\n raise ValueError(\"parent shape must be ether One or All\")\n shape: typing.Union[One, All] = parent_shape\n exports.add(shape.ctor.typename)\n return exports\n\n def get_imports(self):\n imports = set(self.imports)\n for elem in self.param_mapper.params:\n imports.update(elem.get_imports())\n\n if self.conn_type_knowledge is not None:\n imports.update(self.conn_type_knowledge.get_imports())\n\n if (parent := self.parent_query) and parent.module is not self.module:\n parent_shape = parent.shape\n if isinstance(parent_shape, (One, All)):\n imports.add(\n f\"from {parent.module.module_path} import {parent_shape.ctor.typename}\"\n )\n else:\n raise ValueError(\"you can only use return types from 'one' and 'many' queries\")\n else:\n # we're gonna create the type from scratch, so we need the imports\n imports.update(self.shape.get_imports())\n\n return imports\n\n def generate_code(self):\n lines = []\n # data type code\n if type_code := self.shape.generate_type_code():\n if cb := self.module.querky.on_before_type_code_emit:\n type_code = cb(type_code, self)\n lines.extend(type_code)\n lines.append('')\n lines.append('')\n\n # proxy function code, which simply accepts annotated arguments and proxies the call to this query\n func_code = self._generate_proxy_function_code()\n if cb := self.module.querky.on_before_func_code_emit:\n func_code = cb(func_code, self)\n lines.extend(func_code)\n\n if bound_type_ident := self.get_type_bind_ident():\n # binding return type to the underlying query\n lines.append('')\n lines.append(f'{self.local_name}.bind_type({bound_type_ident})')\n\n return lines\n\n def __call__(self, conn, *args, **kwargs):\n if self.contract.is_async():\n return self.execute(conn, *args, **kwargs)\n else:\n return self.execute_sync(conn, *args, **kwargs)" }, { "identifier": "Contract", "path": "querky/contract.py", "snippet": "class Contract(ABC):\n @abstractmethod\n def create_param_mapper(self, query: Query) -> ParamMapper:\n ...\n\n @abstractmethod\n def get_default_record_type_metadata(self) -> TypeMetaData:\n ...\n\n @abstractmethod\n def get_connection_type_metadata(self) -> TypeMetaData:\n ...\n\n @abstractmethod\n async def get_query_signature(self, db, query: Query) -> QuerySignature:\n ...\n\n @abstractmethod\n def get_query_signature_sync(self, db, query: Query) -> QuerySignature:\n ...\n\n @abstractmethod\n def is_async(self) -> bool:\n ...\n\n @abstractmethod\n async def fetch_value(self, conn, query: Query, bound_params):\n ...\n\n @abstractmethod\n async def fetch_one(self, conn, query: Query, bound_params):\n ...\n\n @abstractmethod\n async def fetch_all(self, conn, query: Query, bound_params):\n ...\n\n @abstractmethod\n async def fetch_column(self, conn, query: Query, bound_params):\n ...\n\n @abstractmethod\n async def fetch_status(self, conn, query: Query, bound_params):\n ...\n\n @abstractmethod\n async def raw_execute(self, conn, sql: str, params):\n ...\n\n @abstractmethod\n async def raw_fetchval(self, conn, sql: str, params):\n ...\n\n @abstractmethod\n async def raw_fetchone(self, conn, sql: str, params):\n ...\n\n @abstractmethod\n async def raw_fetch(self, conn, sql: str, params):\n ...\n\n @abstractmethod\n def fetch_value_sync(self, conn, query: Query, bound_params):\n ...\n\n @abstractmethod\n def fetch_one_sync(self, conn, query: Query, bound_params):\n ...\n\n @abstractmethod\n def fetch_all_sync(self, conn, query: Query, bound_params):\n ...\n\n @abstractmethod\n def fetch_column_sync(self, conn, query: Query, bound_params):\n ...\n\n @abstractmethod\n def fetch_status_sync(self, conn, query: Query, bound_params):\n ...\n\n @abstractmethod\n async def raw_execute_sync(self, conn, sql: str, params):\n ...\n\n @abstractmethod\n async def raw_fetchval_sync(self, conn, sql: str, params):\n ...\n\n @abstractmethod\n def raw_fetchone_sync(self, conn, sql: str, params):\n ...\n\n @abstractmethod\n def raw_fetch_sync(self, conn, sql: str, params):\n ..." } ]
import importlib import types import typing import inspect import os import logging from types import ModuleType from os import path from querky.result_shape import one_, all_, value_, status_, column_, One, All, ResultShape from querky.conn_param_config import ConnParamConfig, First from querky.annotation_generator import AnnotationGenerator from querky.type_constructor import TypeConstructor from querky.module_constructor import ModuleConstructor from querky.base_types import TypeMetaData from querky.query import Query from querky.contract import Contract
7,218
conn_param_config: ConnParamConfig | None = None, type_factory: typing.Callable[[Query, str], TypeConstructor] | None = None, subdir: str | None = "queries", on_before_func_code_emit: typing.Optional[typing.Callable[[typing.List[str], Query], typing.List[str]]] = None, on_before_type_code_emit: typing.Optional[typing.Callable[[typing.List[str], Query], typing.List[str]]] = None, imports: typing.Optional[typing.Set[str]] = None, indent: str = ' ', query_class: typing.Type[Query] = Query ): self.basedir = basedir self.on_before_func_code_emit = on_before_func_code_emit self.on_before_type_code_emit = on_before_type_code_emit self.query_class = query_class self.imports = imports or set() self.indent = indent self.annotation_generator = annotation_generator self.module_ctors: dict[types.ModuleType, ModuleConstructor] = dict() self.type_factory = type_factory if conn_param_config is None: conn_param_config = First(name='__conn', positional=True) self.conn_param_config = conn_param_config self.contract = contract self.subdir = subdir if self.subdir and not str.isidentifier(self.subdir): raise ValueError("subdir must be a valid python identifier") self.file_signature = "# ~ AUTOGENERATED BY QUERKY ~ #" def get_indent(self, i: int): return self.indent * i def create_query( self, fn: typing.Callable[[...], str], shape: typing.Callable[[Query], ResultShape], conn_param_config: ConnParamConfig | None, explicit_name: str | None, parent_query: typing.Optional[Query], kwargs: typing.Optional[typing.Dict[str, typing.Any]] ) -> Query: module = inspect.getmodule(fn) if module in self.module_ctors: module_ctor = self.module_ctors[module] else: filename = self.generate_filename(module) if not str.isidentifier(filename): raise ValueError(f"Generated a filename which is not a valid python identifier: {filename}") filedir = path.dirname(module.__file__) new_module_name = module.__name__.rsplit('.', maxsplit=1)[0] if self.subdir: filedir = path.join(filedir, self.subdir) new_module_name = f"{new_module_name}.{self.subdir}" fullpath = path.join(filedir, f'{filename}.py') new_module_name = f"{new_module_name}.{filename}" module_ctor = ModuleConstructor(self, module, fullpath, new_module_name, filedir) self.module_ctors[module] = module_ctor return self.query_class( fn, shape, module_ctor, self.conn_param_config or conn_param_config, explicit_name, parent_query, kwargs ) def query( self, arg: str | TypeMetaData | Query | typing.Callable[[...], str] | None = None, *, shape: ShapeStringRepr = 'status', optional: bool | None = None, **kwargs ) -> QueryDef | Query: def wrapper(fn: typing.Callable[[...], str]) -> Query: nonlocal optional if shape in ['many', 'one']: if isinstance(arg, TypeMetaData): raise ValueError( "TypeMetaData is not supported for `many` or `one` constructors. " "Use it only for `one` and `column` constructors." ) if not isinstance(arg, Query): if arg is None: # if we don't have a name provided for us, we're gonna create it out of the function name type_name = to_camel_case(fn.__name__) else: type_name = arg if not type_name.isidentifier(): raise ValueError(f"Name type should be a valid python identifier. You provided: {type_name}") else: type_name = None type_name: str | None if shape == 'many': if optional is not None: raise TypeError( 'ALL constructor does not accept `optional` flag -- ' 'at least an empty set will always be returned' ) created_shape = all_(type_name) else: if optional is None: optional = True
from __future__ import annotations logger = logging.getLogger("querky") def to_camel_case(snake_str): return "".join(x.capitalize() for x in snake_str.lower().split("_")) ShapeStringRepr = typing.Literal["one", "many", "column", "value", "status"] QueryDef = typing.Callable[[typing.Callable[[...], str]], Query] class Querky: def __init__( self, basedir: str | None = None, annotation_generator: AnnotationGenerator | None = None, contract: Contract | None = None, conn_param_config: ConnParamConfig | None = None, type_factory: typing.Callable[[Query, str], TypeConstructor] | None = None, subdir: str | None = "queries", on_before_func_code_emit: typing.Optional[typing.Callable[[typing.List[str], Query], typing.List[str]]] = None, on_before_type_code_emit: typing.Optional[typing.Callable[[typing.List[str], Query], typing.List[str]]] = None, imports: typing.Optional[typing.Set[str]] = None, indent: str = ' ', query_class: typing.Type[Query] = Query ): self.basedir = basedir self.on_before_func_code_emit = on_before_func_code_emit self.on_before_type_code_emit = on_before_type_code_emit self.query_class = query_class self.imports = imports or set() self.indent = indent self.annotation_generator = annotation_generator self.module_ctors: dict[types.ModuleType, ModuleConstructor] = dict() self.type_factory = type_factory if conn_param_config is None: conn_param_config = First(name='__conn', positional=True) self.conn_param_config = conn_param_config self.contract = contract self.subdir = subdir if self.subdir and not str.isidentifier(self.subdir): raise ValueError("subdir must be a valid python identifier") self.file_signature = "# ~ AUTOGENERATED BY QUERKY ~ #" def get_indent(self, i: int): return self.indent * i def create_query( self, fn: typing.Callable[[...], str], shape: typing.Callable[[Query], ResultShape], conn_param_config: ConnParamConfig | None, explicit_name: str | None, parent_query: typing.Optional[Query], kwargs: typing.Optional[typing.Dict[str, typing.Any]] ) -> Query: module = inspect.getmodule(fn) if module in self.module_ctors: module_ctor = self.module_ctors[module] else: filename = self.generate_filename(module) if not str.isidentifier(filename): raise ValueError(f"Generated a filename which is not a valid python identifier: {filename}") filedir = path.dirname(module.__file__) new_module_name = module.__name__.rsplit('.', maxsplit=1)[0] if self.subdir: filedir = path.join(filedir, self.subdir) new_module_name = f"{new_module_name}.{self.subdir}" fullpath = path.join(filedir, f'{filename}.py') new_module_name = f"{new_module_name}.{filename}" module_ctor = ModuleConstructor(self, module, fullpath, new_module_name, filedir) self.module_ctors[module] = module_ctor return self.query_class( fn, shape, module_ctor, self.conn_param_config or conn_param_config, explicit_name, parent_query, kwargs ) def query( self, arg: str | TypeMetaData | Query | typing.Callable[[...], str] | None = None, *, shape: ShapeStringRepr = 'status', optional: bool | None = None, **kwargs ) -> QueryDef | Query: def wrapper(fn: typing.Callable[[...], str]) -> Query: nonlocal optional if shape in ['many', 'one']: if isinstance(arg, TypeMetaData): raise ValueError( "TypeMetaData is not supported for `many` or `one` constructors. " "Use it only for `one` and `column` constructors." ) if not isinstance(arg, Query): if arg is None: # if we don't have a name provided for us, we're gonna create it out of the function name type_name = to_camel_case(fn.__name__) else: type_name = arg if not type_name.isidentifier(): raise ValueError(f"Name type should be a valid python identifier. You provided: {type_name}") else: type_name = None type_name: str | None if shape == 'many': if optional is not None: raise TypeError( 'ALL constructor does not accept `optional` flag -- ' 'at least an empty set will always be returned' ) created_shape = all_(type_name) else: if optional is None: optional = True
created_shape = one_(type_name, optional=optional)
0
2023-12-13 15:16:34+00:00
12k
javrtg/C2P
tests/test_constraints.py
[ { "identifier": "constraints", "path": "nonmin_pose/constraints/constraints.py", "snippet": "def assert_smaller_idxes(param1i, param2i):\n def __init__(self, name: str, block: int, block_ids: List[int]):\n def __init__(\n self,\n params: dict,\n idx_first_el: int,\n idx_first_eq: int = 0,\n drop_eqs: Optional[List[int]] = None,\n ):\n def flatten_eqs_info(self, idx_first_eq, blocks, rows, cols, drop_eqs):\n def get_eqs_info(self, params):\n def get_eqs_info(self, params):\n def get_eqs_info(self, params):\n def get_eqs_info(self, params):\n def get_eqs_info(self, params):\n def get_eqs_info(self, params):\n def get_eqs_info(self, params):\n def compute_coeffs(self, coeffs: np.ndarray, f0: np.ndarray, f1: np.ndarray):\n def get_eqs_info(self, params):\n def compute_coeffs(self, coeffs: np.ndarray, f0: np.ndarray, f1: np.ndarray):\n def aggregate_data(f0, f1):\n def get_eqs_info(self, params):\n def get_eqs_info(self, params):\n def get_eqs_info(self, params):\n def get_eqs_info(self, params):\n def get_eqs_info(self, params):\n def get_eqs_info(self, params):\n def get_eqs_info(self, params):\n def get_eqs_info(self, params):\n def compute_coeffs(self, coeffs: np.ndarray, f0: np.ndarray, f1: np.ndarray):\n def get_eqs_info(self, params):\n def compute_coeffs(self, coeffs: np.ndarray, f0: np.ndarray, f1: np.ndarray):\n def aggregate_data(f0, f1):\n def get_eqs_info(self, params):\n def compute_coeffs(self, coeffs: np.ndarray, f0: np.ndarray, f1: np.ndarray):\n def aggregate_data_1st_ineq(f0: np.ndarray, f1: np.ndarray):\n def aggregate_data_2nd_ineq(f0: np.ndarray, f1: np.ndarray):\n def get_eqs_info(self, params):\n def get_eqs_info(self, params):\n def get_eqs_info(self, params):\n def get_eqs_info(self, params):\n def get_eqs_info(self, params):\nclass Parameter:\nclass Constraint(ABC):\nclass Adjoint(Constraint):\nclass NormT(Constraint):\nclass NormQ(Constraint):\nclass NormE(Constraint):\nclass Homogenization(Constraint):\nclass CheiralityTranslationV2(Constraint):\nclass CheiralityRotation(Constraint):\nclass ManifDefLeft(Constraint):\nclass ManifDefRight(Constraint):\nclass EDefLeft(Constraint):\nclass EDefRight(Constraint):\nclass EDefLeftRight(Constraint):\nclass RightNullSpace(Constraint):\nclass LeftNullSpace(Constraint):\nclass CheiralityTranslation(Constraint):\nclass CheiralityRotationQ(Constraint):\nclass CheiralityMidpoint(Constraint):\nclass Orthogonality(Constraint):\nclass DeterminantR(Constraint):\nclass TQDefinition(Constraint):\nclass SkewTQDefinition(Constraint):\nclass ConvexHullSO3(Constraint):\n CONSTRAINT_IDX_PER_EQ: List[List[int]]\n COEFFS_PER_EQ: List[List[float]]\n CONSTRAINT_VALUES: List[float]\n EQUATION = \"adj(E) = qt^T\"\n COEFFS_PER_EQ = [\n [1.0, -1.0, -1.0],\n [1.0, -1.0, -1.0],\n [1.0, -1.0, -1.0],\n [1.0, -1.0, -1.0],\n [1.0, -1.0, -1.0],\n [1.0, -1.0, -1.0],\n [1.0, -1.0, -1.0],\n [1.0, -1.0, -1.0],\n [1.0, -1.0, -1.0],\n ]\n CONSTRAINT_VALUES = [0.0] * 9\n EQUATION = \"||t||^2 = 1\"\n COEFFS_PER_EQ = [[1.0, 1.0, 1.0]]\n CONSTRAINT_VALUES = [1.0]\n EQUATION = \"||q||^2 = 1\"\n COEFFS_PER_EQ = [[1.0, 1.0, 1.0]]\n CONSTRAINT_VALUES = [1.0]\n EQUATION = \"norm(E) = 2\"\n COEFFS_PER_EQ = [[1.0] * 9]\n CONSTRAINT_VALUES = [2.0]\n E = params[\"E\"]\n EQUATION = \"h^2 = 1\"\n COEFFS_PER_EQ = [[1.0]]\n CONSTRAINT_VALUES = [1.0]\n EQUATION = \"f0^T t01 - q^T f1 - sct^2 = 0\"\n COEFFS_PER_EQ = [[1.0] * 6 + [-1.0]]\n CONSTRAINT_VALUES = [0.0]\n CONSTRAINT_VALUES = [0.0]\n EQUATION = \"f1^T E01^T [t01] f0 - scr^2 =0\"\n COEFFS_PER_EQ = [[1.0] * 18 + [-1.0]]\n CONSTRAINT_VALUES = [0.0]\n EQUATION = \"E E^T = [t][t]^T\"\n COEFFS_PER_EQ = [\n [1.0, 1.0, 1.0, -1.0, -1.0],\n [1.0, 1.0, 1.0, -1.0, -1.0],\n [1.0, 1.0, 1.0, -1.0, -1.0],\n [1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0],\n ]\n CONSTRAINT_VALUES = [0.0] * 6\n EQUATION = \"E^T E = [q][q]^T\"\n COEFFS_PER_EQ = [\n [1.0, 1.0, 1.0, -1.0, -1.0],\n [1.0, 1.0, 1.0, -1.0, -1.0],\n [1.0, 1.0, 1.0, -1.0, -1.0],\n [1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0],\n ]\n CONSTRAINT_VALUES = [0.0] * 6\n EQUATION = \"hE = [t]R\"\n COEFFS_PER_EQ = [\n [1.0, 1.0, -1.0],\n [1.0, 1.0, -1.0],\n [1.0, 1.0, -1.0],\n [1.0, -1.0, 1.0],\n [1.0, -1.0, 1.0],\n [1.0, -1.0, 1.0],\n [1.0, 1.0, -1.0],\n [1.0, 1.0, -1.0],\n [1.0, 1.0, -1.0],\n ]\n CONSTRAINT_VALUES = [0.0] * 9\n EQUATION = \"hE = R[q]\"\n COEFFS_PER_EQ = [\n [1.0, -1.0, 1.0],\n [1.0, 1.0, -1.0],\n [1.0, -1.0, 1.0],\n [1.0, -1.0, 1.0],\n [1.0, 1.0, -1.0],\n [1.0, -1.0, 1.0],\n [1.0, -1.0, 1.0],\n [1.0, 1.0, -1.0],\n [1.0, -1.0, 1.0],\n ]\n CONSTRAINT_VALUES = [0.0] * 9\n EQUATION = \"[t]R = R[q] = 0\"\n COEFFS_PER_EQ = [\n [1.0, -1.0, 1.0, -1.0],\n [1.0, -1.0, -1.0, 1.0],\n [1.0, -1.0, 1.0, -1.0],\n [-1.0, 1.0, 1.0, -1.0],\n [-1.0, 1.0, -1.0, 1.0],\n [-1.0, 1.0, 1.0, -1.0],\n [1.0, -1.0, 1.0, -1.0],\n [1.0, -1.0, -1.0, 1.0],\n [1.0, -1.0, 1.0, -1.0],\n ]\n CONSTRAINT_VALUES = [0.0] * 9\n EQUATION = \"E q = 0\"\n COEFFS_PER_EQ = [\n [1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0],\n ]\n CONSTRAINT_VALUES = [0.0] * 3\n EQUATION = \"E^T t = 0\"\n COEFFS_PER_EQ = [\n [1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0],\n ]\n CONSTRAINT_VALUES = [0.0] * 3\n EQUATION = \"f0^T R01 q - t01^T R01 f1 - s1^2 = 0\"\n COEFFS_PER_EQ = [[1.0] * 18 + [-1.0]]\n CONSTRAINT_VALUES = [0.0]\n EQUATION = \"f0^T E01 [q] f1 + scr^2 = 0\"\n COEFFS_PER_EQ = [[1.0] * 19]\n CONSTRAINT_VALUES = [0.0]\n EQUATION = \"f0^T R f1 - t^T R f1 - scm1^2 = 0, f0^T R q - f1^T q - scm2^2 = 0\"\n COEFFS_PER_EQ = [[1.0] * 27 + [-1.0], [1.0] * 27 + [-1.0]]\n CONSTRAINT_VALUES = [0.0, 0.0]\n EQUATION = \"R R.T = I, R.T R = I\"\n COEFFS_PER_EQ = [[1.0, 1.0, 1.0]] * 11\n CONSTRAINT_VALUES = [1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0]\n R = params[\"R\"]\n EQUATION = \"hR = cofactor(R)\"\n COEFFS_PER_EQ = [[1.0, -1.0, 1.0]] * 9\n CONSTRAINT_VALUES = [0.0] * 9\n EQUATION = \"ht - Rq = 0; hq - R^Tt = 0\"\n COEFFS_PER_EQ = [[1.0, -1.0, -1.0, -1.0]] * 6\n CONSTRAINT_VALUES = [0.0] * 6\n EQUATION = \"h[t] - ER^T, h[q] - R^T E\"\n COEFFS_PER_EQ = [\n [-1.0, -1.0, -1.0],\n [-1.0, -1.0, -1.0, -1.0],\n [1.0, -1.0, -1.0, -1.0],\n [1.0, -1.0, -1.0, -1.0],\n [-1.0, -1.0, -1.0],\n [-1.0, -1.0, -1.0, -1.0],\n [-1.0, -1.0, -1.0, -1.0],\n [1.0, -1.0, -1.0, -1.0],\n [-1.0, -1.0, -1.0],\n [-1.0, -1.0, -1.0],\n [-1.0, -1.0, -1.0, -1.0],\n [1.0, -1.0, -1.0, -1.0],\n [1.0, -1.0, -1.0, -1.0],\n [-1.0, -1.0, -1.0],\n [-1.0, -1.0, -1.0, -1.0],\n [-1.0, -1.0, -1.0, -1.0],\n [1.0, -1.0, -1.0, -1.0],\n [-1.0, -1.0, -1.0],\n ]\n CONSTRAINT_VALUES = [0.0] * 18\n EQUATION = \"conv SO(3)\"\n COEFFS_PER_EQ = [\n [1.0, -1.0, -1.0, -1.0],\n [1.0, -1.0, 1.0],\n [1.0, -1.0, 1.0],\n [1.0, -1.0, 1.0],\n [1.0, -1.0, 1.0, 1.0],\n [1.0, -1.0, -1.0],\n [1.0, -1.0, -1.0],\n [1.0, 1.0, -1.0, 1.0],\n [1.0, -1.0, -1.0],\n [1.0, 1.0, 1.0, -1.0],\n ]\n CONSTRAINT_VALUES = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0]" }, { "identifier": "Parameter", "path": "nonmin_pose/constraints/constraints.py", "snippet": "class Parameter:\n \"\"\"Class for defining a parameter.\n\n Attributes:\n name: e.g. E, R, t, etc. This MUST match the name being used on the constraints.\n block: 1-based index of the block.\n block_ids: 1-based index of each parameter element in the block.\n \"\"\"\n\n __slots__ = (\"name\", \"block\", \"block_ids\")\n\n def __init__(self, name: str, block: int, block_ids: List[int]):\n assert block > 0, \"block must be positive\"\n assert all(idx > 0 for idx in block_ids), \"block_id must be positive\"\n\n self.name = name\n self.block = block\n self.block_ids = block_ids" }, { "identifier": "SyntheticData", "path": "tests/testing_utils.py", "snippet": "class SyntheticData:\n \"\"\"Data generation based on [1, Sec. 7.2.1] and [2].\n\n [1] An Efficient Solution to Non-Minimal Case Essential Matrix Estimation, J.Zhao.\n [2] https://github.com/jizhaox/npt-pose/blob/master/src/create2D2DExperiment.cpp\n \"\"\"\n\n def __init__(self, seed=0, min_depth=4.0, max_depth=8.0, focal=800.0) -> None:\n self.rng = np.random.default_rng(seed)\n self.min_depth = min_depth\n self.max_depth = max_depth\n self.focal = focal # pixels\n\n def generate_data(\n self,\n transl_magnitude=2.0,\n euler_ang_magnitude=0.5,\n max_npoints=200,\n noise_level=0.0,\n scale_t=None,\n Rw1=None,\n tw1=None,\n ):\n \"\"\"Generate synthetic data.\"\"\"\n # absolute camera poses (w.r.t. world \"w\" reference).\n Rw0, tw0 = self.cam0_absolute_pose\n Rw1, tw1 = self.set_cam1_absolute_pose(\n transl_magnitude, euler_ang_magnitude, scale_t, Rw1, tw1\n )\n # relative pose such that p0 = R01 * p1 + t01.\n R01, t01, t01_unit = self.compute_relative_pose(Rw0, tw0, Rw1, tw1, scale_t)\n E01 = skew(t01_unit) @ R01\n f0_noisy, f1_noisy = self.generate_bearings(\n Rw0, tw0, Rw1, tw1, max_npoints, noise_level\n )\n return {\n \"f0\": f0_noisy,\n \"f1\": f1_noisy,\n \"R01\": R01,\n \"t01\": t01,\n \"t01_unit\": t01_unit,\n \"E01\": E01,\n }\n\n def generate_bearings(self, Rw0, tw0, Rw1, tw1, max_npoints, noise_level):\n # generate 3D points sampling from a unit cube.\n pw = self.generate_absolute_3d_points(max_npoints)\n\n # transform points to each camera reference.\n p0 = Rw0.T @ pw - Rw0.T @ tw0\n p1 = Rw1.T @ pw - Rw1.T @ tw1\n\n # corresponding bearing vectors.\n f0 = p0 / np.linalg.norm(p0, axis=0)\n f1 = p1 / np.linalg.norm(p1, axis=0)\n\n # add noise to the bearing vectors.\n f0_noisy = self.add_noise_to_bearings(f0, max_npoints, noise_level)\n f1_noisy = self.add_noise_to_bearings(f1, max_npoints, noise_level)\n return f0_noisy, f1_noisy\n\n def generate_absolute_3d_points(self, max_npoints):\n \"\"\"Sample 3D points sampling from a unit cube.\"\"\"\n unit_cube = self.rng.uniform(-0.5, 0.5, (3, max_npoints))\n directions = unit_cube / np.linalg.norm(unit_cube, axis=0)\n magnitudes = self.rng.uniform(self.min_depth, self.max_depth, (1, max_npoints))\n pw = magnitudes * directions\n return pw\n\n def add_noise_to_bearings(self, f, n, noise_level):\n \"\"\"Add noise to each bearing vector assuming spherical cameras.\n\n The noise, in pixels, is added in the tangent plane of each bearing. The\n distance of each tangent plane is determined by the focal length of the camera.\n \"\"\"\n cols_idx = np.arange(n)\n\n max_args, min_args = np.abs(f).argmax(0), np.abs(f).argmin(0)\n max_vals, min_vals = f[max_args, cols_idx], f[min_args, cols_idx]\n\n # first perpendicular vector.\n ortho_a = np.zeros((3, n))\n ortho_a[min_args, cols_idx] = 1.0\n ortho_a[max_args, cols_idx] = -min_vals / max_vals\n ortho_a = ortho_a / np.linalg.norm(ortho_a, axis=0)\n\n # second perpendicular vector.\n ortho_b = np.cross(f, ortho_a, axis=0)\n\n # add gaussian noise to each bearing.\n noise = self.rng.normal(0, noise_level, (2, n))\n f_noisy = self.focal * f + noise[0] * ortho_a + noise[1] * ortho_b\n f_noisy = f_noisy / np.linalg.norm(f_noisy, axis=0)\n return f_noisy\n\n def set_cam1_absolute_pose(\n self, transl_magnitude, euler_ang_magnitude, scale_t, Rw1, tw1\n ):\n \"\"\"camera 1 pose (w.r.t. world \"w\" reference).\"\"\"\n if Rw1 is None:\n euler_angles = self.rng.uniform(\n -euler_ang_magnitude, euler_ang_magnitude, (3,)\n )\n Rw1 = R.from_euler(\"zyx\", euler_angles).as_matrix()\n\n if tw1 is None:\n tw1 = transl_magnitude * self.rng.uniform(-1, 1, (3, 1))\n\n if scale_t is not None:\n # set translation magnitude, useful e.g. for accuracy vs translation length.\n tw1 = tw1 / np.linalg.norm(tw1) * scale_t\n return Rw1, tw1\n\n def compute_relative_pose(self, Rw0, tw0, Rw1, tw1, scale_t):\n \"\"\"Compute relative pose such that p0 = R01 * p1 + t01.\"\"\"\n R01 = Rw0.T @ Rw1\n t01 = Rw0.T @ (tw1 - tw0)\n if scale_t is None or scale_t > 0:\n t01_unit = t01 / np.linalg.norm(t01)\n else:\n # when there is pure rotation, any unit translation would satisfy the\n # epipolar constraint, e.g. we set it here to the x-axis unit vector.\n t01_unit = np.array([[1.0], [0], [0]])\n return R01, t01, t01_unit\n\n @property\n def cam0_absolute_pose(self):\n \"\"\"Camera 0 pose (w.r.t. world \"w\" reference).\"\"\"\n return np.eye(3), np.zeros((3, 1))" }, { "identifier": "adjoint_of_3x3_mat", "path": "tests/testing_utils.py", "snippet": "def adjoint_of_3x3_mat(E):\n \"\"\"Adjoint of a 3x3 matrix (valid for an essential matrix).\"\"\"\n assert E.shape == (3, 3)\n det = np.linalg.det\n det_minor00 = det(E[1:, 1:])\n det_minor01 = -det(E[1:, ::2])\n det_minor02 = det(E[1:, :2])\n det_minor10 = -det(E[::2, 1:])\n det_minor11 = det(E[::2, ::2])\n det_minor12 = -det(E[::2, :2])\n det_minor20 = det(E[:2, 1:])\n det_minor21 = -det(E[:2, ::2])\n det_minor22 = det(E[:2, :2])\n\n # adjugate/adjoint is the *transpose* of the matrix of cofactors.\n adj = np.array(\n [\n [det_minor00, det_minor10, det_minor20],\n [det_minor01, det_minor11, det_minor21],\n [det_minor02, det_minor12, det_minor22],\n ]\n )\n return adj" }, { "identifier": "sdpa2mat", "path": "tests/testing_utils.py", "snippet": "def sdpa2mat(constraint, block_sizes=[29], ndim=29):\n \"\"\"Converts SDPA format to matrix form.\"\"\"\n con_idx, blocks, values, rows, cols, coeffs = (\n constraint.constraint_idx,\n constraint.blocks,\n constraint.values,\n constraint.rows,\n constraint.cols,\n constraint.coeffs,\n )\n n_constraints = len(values)\n assert (np.unique(con_idx) == np.arange(1, n_constraints + 1)).all()\n assert (np.unique(blocks) == np.arange(1, len(block_sizes) + 1)).all()\n assert ndim == sum(block_sizes)\n\n # initialize and fill matrices of constraints.\n As = np.zeros((n_constraints, ndim, ndim))\n for block, constraint, row, col, coef in zip(blocks, con_idx, rows, cols, coeffs):\n # 0-based indexing.\n block, constraint, row, col = block - 1, constraint - 1, row - 1, col - 1\n rc_offset = sum(block_sizes[:block])\n row += rc_offset\n col += rc_offset\n As[constraint, row, col] = coef\n return As" }, { "identifier": "skew", "path": "tests/testing_utils.py", "snippet": "def skew(v):\n out = np.zeros((3, 3))\n out[0, 1] = -v[2, 0]\n out[0, 2] = v[1, 0]\n out[1, 0] = v[2, 0]\n out[1, 2] = -v[0, 0]\n out[2, 0] = -v[1, 0]\n out[2, 1] = v[0, 0]\n return out" }, { "identifier": "so3_orbitope", "path": "tests/testing_utils.py", "snippet": "def so3_orbitope(R):\n \"\"\"\n [1 + r00 + r11 + r22, r21 - r12, r02 - r20, r10 - r01 ]\n [r21 - r12, 1 + r00 - r11 - r22, r10 + r01, r02 + r20 ]\n [r02 - r20, r10 + r01, 1 - r00 + r11 - r22, r21 + r12 ]\n [r10 - r01, r02 + r20, r21 + r12, 1 - r00 - r11 + r22]\n \"\"\"\n r00, r01, r02, r10, r11, r12, r20, r21, r22 = R.ravel()\n return np.array(\n [\n [1 + r00 + r11 + r22, r21 - r12, r02 - r20, r10 - r01],\n [r21 - r12, 1 + r00 - r11 - r22, r10 + r01, r02 + r20],\n [r02 - r20, r10 + r01, 1 - r00 + r11 - r22, r21 + r12],\n [r10 - r01, r02 + r20, r21 + r12, 1 - r00 - r11 + r22],\n ]\n )" } ]
import numpy as np from nonmin_pose.constraints import constraints from nonmin_pose.constraints.constraints import Parameter from tests.testing_utils import ( SyntheticData, adjoint_of_3x3_mat, sdpa2mat, skew, so3_orbitope, )
7,353
CFG_DATASET = { "seed": 0, "min_depth": 4.0, "max_depth": 8.0, "focal": 800.0, } CFG_DATA = { "transl_magnitude": 1.0, "euler_ang_magnitude": 0.5, "max_npoints": 100, "noise_level": 0.0, } def create_parameters(): params = [ Parameter("E", 1, list(range(1, 10))), Parameter("t", 1, list(range(10, 13))), Parameter("q", 1, list(range(13, 16))), Parameter("h", 1, [16]), Parameter("R", 1, list(range(17, 26))), Parameter("sct", 1, [26]), Parameter("scr", 1, [27]), Parameter("scr2", 1, [28]), Parameter("scm1", 1, [29]), Parameter("scm2", 1, [30]), Parameter("Zc", 1, list(range(31, 47))), ] return {p.name: p for p in params} def sample_data(): dataset = SyntheticData(**CFG_DATASET) data = dataset.generate_data(**CFG_DATA) h, sct, scr, scr2, scm1, scm2 = 1.0, 0.0, 0.0, 0.0, 0.0, 0.0 q = data["R01"].T @ data["t01_unit"] x = np.concatenate( ( data["E01"].ravel(), data["t01_unit"].ravel(), q.ravel(), [h], data["R01"].ravel(), [sct, scr, scr2, scm1, scm2], so3_orbitope(data["R01"]).ravel(), ) ) return x[:, None], data def gather_errors(x, A, constraint, constraint_num, is_inequality): values = constraint.values if is_inequality: cond_sdpa_sdpa = np.allclose(values, np.zeros_like(values)) cond_data_sdpa = np.allclose((x.T @ A @ x).squeeze(), constraint_num) else: cond_sdpa_sdpa = np.allclose((x.T @ A @ x).squeeze(), values) cond_data_sdpa = np.allclose(constraint_num, values) errors = [] if not cond_sdpa_sdpa: if is_inequality: errors.append("SDPA coefficients are not zero.") else: errors.append("SDPA coefficients lead to different SDPA values.") if not cond_data_sdpa: errors.append( "SDPA values are different than those derived from data." f"\n{(x.T @ A @ x).squeeze()}\n{constraint_num}" ) success = len(errors) == 0 err_msg = "Errors:\n{}".format("\n".join(errors)) return success, err_msg def obtain_errors(constraint_class, x, constraint_num, f0=None, f1=None): params = create_parameters() constraint = constraint_class(params, 0, 0, None) is_inequality = constraint.__class__.__name__.startswith("Cheirality") if is_inequality: constraint.compute_coeffs(constraint.coeffs, f0, f1) A = sdpa2mat(constraint, block_sizes=[len(x)], ndim=len(x)) errors = gather_errors(x, A, constraint, constraint_num, is_inequality) return errors def test_manif_def_left(): x, data = sample_data() E01, t01_unit = data["E01"], data["t01_unit"]
CFG_DATASET = { "seed": 0, "min_depth": 4.0, "max_depth": 8.0, "focal": 800.0, } CFG_DATA = { "transl_magnitude": 1.0, "euler_ang_magnitude": 0.5, "max_npoints": 100, "noise_level": 0.0, } def create_parameters(): params = [ Parameter("E", 1, list(range(1, 10))), Parameter("t", 1, list(range(10, 13))), Parameter("q", 1, list(range(13, 16))), Parameter("h", 1, [16]), Parameter("R", 1, list(range(17, 26))), Parameter("sct", 1, [26]), Parameter("scr", 1, [27]), Parameter("scr2", 1, [28]), Parameter("scm1", 1, [29]), Parameter("scm2", 1, [30]), Parameter("Zc", 1, list(range(31, 47))), ] return {p.name: p for p in params} def sample_data(): dataset = SyntheticData(**CFG_DATASET) data = dataset.generate_data(**CFG_DATA) h, sct, scr, scr2, scm1, scm2 = 1.0, 0.0, 0.0, 0.0, 0.0, 0.0 q = data["R01"].T @ data["t01_unit"] x = np.concatenate( ( data["E01"].ravel(), data["t01_unit"].ravel(), q.ravel(), [h], data["R01"].ravel(), [sct, scr, scr2, scm1, scm2], so3_orbitope(data["R01"]).ravel(), ) ) return x[:, None], data def gather_errors(x, A, constraint, constraint_num, is_inequality): values = constraint.values if is_inequality: cond_sdpa_sdpa = np.allclose(values, np.zeros_like(values)) cond_data_sdpa = np.allclose((x.T @ A @ x).squeeze(), constraint_num) else: cond_sdpa_sdpa = np.allclose((x.T @ A @ x).squeeze(), values) cond_data_sdpa = np.allclose(constraint_num, values) errors = [] if not cond_sdpa_sdpa: if is_inequality: errors.append("SDPA coefficients are not zero.") else: errors.append("SDPA coefficients lead to different SDPA values.") if not cond_data_sdpa: errors.append( "SDPA values are different than those derived from data." f"\n{(x.T @ A @ x).squeeze()}\n{constraint_num}" ) success = len(errors) == 0 err_msg = "Errors:\n{}".format("\n".join(errors)) return success, err_msg def obtain_errors(constraint_class, x, constraint_num, f0=None, f1=None): params = create_parameters() constraint = constraint_class(params, 0, 0, None) is_inequality = constraint.__class__.__name__.startswith("Cheirality") if is_inequality: constraint.compute_coeffs(constraint.coeffs, f0, f1) A = sdpa2mat(constraint, block_sizes=[len(x)], ndim=len(x)) errors = gather_errors(x, A, constraint, constraint_num, is_inequality) return errors def test_manif_def_left(): x, data = sample_data() E01, t01_unit = data["E01"], data["t01_unit"]
constraint_num = E01 @ E01.T - skew(t01_unit) @ skew(t01_unit).T
5
2023-12-10 18:25:10+00:00
12k
Jack24658735/FedLGT
fed_main.py
[ { "identifier": "get_data", "path": "load_data.py", "snippet": "def get_data(args, curr_user=None):\n dataset = args.dataset\n data_root = args.dataroot\n batch_size = args.batch_size\n\n rescale = args.scale_size\n random_crop = args.crop_size\n attr_group_dict = args.attr_group_dict\n workers = args.workers\n n_groups = args.n_groups\n\n normTransform = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n scale_size = rescale\n crop_size = random_crop\n if args.test_batch_size == -1:\n args.test_batch_size = batch_size\n\n trainTransform = transforms.Compose([\n transforms.Resize((scale_size, scale_size)),\n transforms.Resize((crop_size, crop_size)),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normTransform])\n\n testTransform = transforms.Compose([\n transforms.Resize((scale_size, scale_size)),\n transforms.CenterCrop(crop_size),\n transforms.ToTensor(),\n normTransform])\n\n test_dataset = None\n test_loader = None\n drop_last = False\n if dataset == 'coco':\n coco_root = os.path.join(data_root,'coco')\n ann_dir = os.path.join(coco_root,'annotations_pytorch')\n train_img_root = os.path.join(coco_root,'train2014')\n test_img_root = os.path.join(coco_root,'val2014')\n train_data_name = 'train.data'\n val_data_name = 'val_test.data'\n # Note: the val_test means the validation set and test set are combined\n # 20000 + 20504 = 40504 images\n \n train_dataset = Coco80Dataset(\n split='train',\n num_labels=args.num_labels,\n data_file=os.path.join(coco_root,train_data_name),\n img_root=train_img_root,\n annotation_dir=ann_dir,\n max_samples=args.max_samples,\n transform=trainTransform,\n known_labels=args.train_known_labels,\n testing=False)\n valid_dataset = None\n valid_loader = None\n test_dataset = Coco80Dataset(split='val',\n num_labels=args.num_labels,\n data_file=os.path.join(coco_root,val_data_name),\n img_root=test_img_root,\n annotation_dir=ann_dir,\n max_samples=args.max_samples,\n transform=testTransform,\n known_labels=args.test_known_labels,\n testing=True)\n elif dataset == 'coco1000':\n ann_dir = os.path.join(data_root,'coco','annotations_pytorch')\n data_dir = os.path.join(data_root,'coco')\n train_img_root = os.path.join(data_dir,'train2014')\n test_img_root = os.path.join(data_dir,'val2014')\n \n train_dataset = Coco1000Dataset(ann_dir, data_dir, split = 'train', transform = trainTransform,known_labels=args.train_known_labels,testing=False)\n valid_dataset = Coco1000Dataset(ann_dir, data_dir, split = 'val', transform = testTransform,known_labels=args.test_known_labels,testing=True)\n elif dataset == 'vg':\n vg_root = os.path.join(data_root,'VG')\n train_dir=os.path.join(vg_root,'VG_100K')\n train_list=os.path.join(vg_root,'train_list_500.txt')\n test_dir=os.path.join(vg_root,'VG_100K')\n test_list=os.path.join(vg_root,'test_list_500.txt')\n train_label=os.path.join(vg_root,'vg_category_500_labels_index.json')\n test_label=os.path.join(vg_root,'vg_category_500_labels_index.json')\n\n train_dataset = VGDataset(\n train_dir,\n train_list,\n trainTransform, \n train_label,\n known_labels=0,\n testing=False)\n \n valid_dataset = None\n valid_loader = None\n test_dataset = VGDataset(\n test_dir,\n test_list,\n testTransform,\n test_label,\n known_labels=args.test_known_labels,\n testing=True)\n \n elif dataset == 'news':\n drop_last=True\n ann_dir = '/bigtemp/jjl5sw/PartialMLC/data/bbc_data/'\n\n train_dataset = NewsDataset(ann_dir, split = 'train', transform = trainTransform,known_labels=0,testing=False)\n valid_dataset = NewsDataset(ann_dir, split = 'test', transform = testTransform,known_labels=args.test_known_labels,testing=True)\n \n elif dataset=='voc':\n voc_root = os.path.join(data_root,'voc/VOCdevkit/VOC2007/')\n img_dir = os.path.join(voc_root,'JPEGImages')\n anno_dir = os.path.join(voc_root,'Annotations')\n train_anno_path = os.path.join(voc_root,'ImageSets/Main/trainval.txt')\n test_anno_path = os.path.join(voc_root,'ImageSets/Main/test.txt')\n\n train_dataset = Voc07Dataset(\n img_dir=img_dir,\n anno_path=train_anno_path,\n image_transform=trainTransform,\n labels_path=anno_dir,\n known_labels=args.train_known_labels,\n testing=False,\n use_difficult=False)\n valid_dataset = None\n valid_loader = None\n # valid_dataset = Voc07Dataset(\n # img_dir=img_dir,\n # anno_path=test_anno_path,\n # image_transform=testTransform,\n # labels_path=anno_dir,\n # known_labels=args.test_known_labels,\n # testing=True)\n test_dataset = Voc07Dataset(\n img_dir=img_dir,\n anno_path=test_anno_path,\n image_transform=testTransform,\n labels_path=anno_dir,\n known_labels=args.test_known_labels,\n testing=True)\n\n elif dataset == 'cub':\n drop_last=True\n resol=299\n resized_resol = int(resol * 256/224)\n \n trainTransform = transforms.Compose([\n #transforms.Resize((resized_resol, resized_resol)),\n #transforms.RandomSizedCrop(resol),\n transforms.ColorJitter(brightness=32/255, saturation=(0.5, 1.5)),\n transforms.RandomResizedCrop(resol),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(), #implicitly divides by 255\n transforms.Normalize(mean = [0.5, 0.5, 0.5], std = [2, 2, 2])\n ])\n\n testTransform = transforms.Compose([\n #transforms.Resize((resized_resol, resized_resol)),\n transforms.CenterCrop(resol),\n transforms.ToTensor(), #implicitly divides by 255\n transforms.Normalize(mean = [0.5, 0.5, 0.5], std = [2, 2, 2])\n ])\n \n cub_root = os.path.join(data_root,'CUB_200_2011')\n image_dir = os.path.join(cub_root,'images')\n train_list = os.path.join(cub_root,'class_attr_data_10','train_valid.pkl')\n valid_list = os.path.join(cub_root,'class_attr_data_10','train_valid.pkl')\n test_list = os.path.join(cub_root,'class_attr_data_10','test.pkl')\n\n train_dataset = CUBDataset(image_dir, train_list, trainTransform,known_labels=args.train_known_labels,attr_group_dict=attr_group_dict,testing=False,n_groups=n_groups)\n valid_dataset = CUBDataset(image_dir, valid_list, testTransform,known_labels=args.test_known_labels,attr_group_dict=attr_group_dict,testing=True,n_groups=n_groups)\n test_dataset = CUBDataset(image_dir, test_list, testTransform,known_labels=args.test_known_labels,attr_group_dict=attr_group_dict,testing=True,n_groups=n_groups)\n elif dataset == 'flair':\n # TODO:\n # central: \n # data file has key: {'metadata', 'train', 'val', 'test'}\n # metadata: {label_counter, fine_grained_label_counter}\n # Note: use np.array() to read in \n # train: keys() contain all image IDs\n data_dir = os.path.join(data_root, 'flair')\n img_root = os.path.join(data_dir, 'data/small_images')\n label_mapping = None\n fg_label_mapping = None\n \n if args.flair_fine:\n with open(data_dir + '/fine_grained_label_mapping.json') as fg:\n fg_label_mapping = json.load(fg)\n else:\n with open(data_dir + '/label_mapping.json') as f:\n label_mapping = json.load(f)\n\n trainTransform = transforms.Compose([transforms.ToPILImage(),\n transforms.Resize((scale_size, scale_size)),\n # transforms.Resize((crop_size, crop_size)),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normTransform])\n\n testTransform = transforms.Compose([transforms.ToPILImage(),\n transforms.Resize((scale_size, scale_size)),\n transforms.CenterCrop(crop_size),\n transforms.ToTensor(),\n normTransform])\n \n train_dataset = FlairDataset(split='train', \n num_labels=args.num_labels,\n data_file=data_dir,\n img_root=img_root,\n transform=trainTransform,\n label_mapping=label_mapping,\n fine_grained_label_mapping=fg_label_mapping,\n known_labels=args.train_known_labels)\n # modify this, maybe should re-run? (2023.1.13)\n valid_dataset = FlairDataset(split='val', \n num_labels=args.num_labels,\n data_file=data_dir,\n img_root=img_root,\n transform=testTransform,\n label_mapping=label_mapping,\n fine_grained_label_mapping=fg_label_mapping)\n test_dataset = FlairDataset(split='test', \n num_labels=args.num_labels,\n data_file=data_dir,\n img_root=img_root,\n transform=testTransform,\n label_mapping=label_mapping,\n fine_grained_label_mapping=fg_label_mapping)\n elif dataset == 'flair_fed':\n # TODO:\n # 1. sample user id (e.g., 200 users per round)\n # 2. for each user, build a model\n # ref: NIID-bench\n # build \"net_dataidx_map\" for each user (i.e. for each user, it has a dataidx list)\n # get_dataloader returns \"train/test_dl_local\"\n # Here, we build the dataset to allow the \"dataidx\"!\n data_dir = os.path.join(data_root, 'flair')\n img_root = os.path.join(data_dir, 'data/small_images')\n\n label_mapping = None\n fg_label_mapping = None\n \n if args.flair_fine:\n with open(data_dir + '/fine_grained_label_mapping.json') as fg:\n fg_label_mapping = json.load(fg)\n else:\n with open(data_dir + '/label_mapping.json') as f:\n label_mapping = json.load(f)\n trainTransform = transforms.Compose([transforms.ToPILImage(),\n transforms.Resize((scale_size, scale_size)),\n # transforms.Resize((crop_size, crop_size)),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normTransform])\n\n testTransform = transforms.Compose([transforms.ToPILImage(),\n transforms.Resize((scale_size, scale_size)),\n transforms.CenterCrop(crop_size),\n transforms.ToTensor(),\n normTransform])\n \n \n inp_data = h5py.File('/media/liujack/flair_hdf5/fl_data.hdf5', 'r')\n train_dataset = None\n\n\n if curr_user != None:\n train_dataset = FlairFedDataset(inp_data=inp_data,\n split='train', \n num_labels=args.num_labels,\n data_file=data_dir,\n img_root=img_root,\n curr_user=curr_user,\n transform=trainTransform,\n label_mapping=label_mapping,\n fine_grained_label_mapping=fg_label_mapping,\n known_labels=args.train_known_labels)\n else:\n train_dataset = inp_data\n # client agnoistic dataset\n valid_dataset = FlairDataset(split='val', \n num_labels=args.num_labels,\n data_file=data_dir,\n img_root=img_root,\n transform=testTransform,\n label_mapping=label_mapping,\n fine_grained_label_mapping=fg_label_mapping)\n # client agnoistic dataset\n test_dataset = FlairDataset(split='test', \n num_labels=args.num_labels,\n data_file=data_dir,\n img_root=img_root,\n transform=testTransform,\n label_mapping=label_mapping,\n fine_grained_label_mapping=fg_label_mapping)\n\n else:\n print('no dataset avail')\n exit(0)\n\n if train_dataset is not None:\n train_loader = DataLoader(train_dataset, batch_size=batch_size,shuffle=True, num_workers=workers,drop_last=drop_last)\n if valid_dataset is not None:\n valid_loader = DataLoader(valid_dataset, batch_size=args.test_batch_size,shuffle=False, num_workers=workers)\n if test_dataset is not None:\n test_loader = DataLoader(test_dataset, batch_size=args.test_batch_size,shuffle=False, num_workers=workers)\n if dataset in ['flair_fed']:\n return train_loader, valid_loader, test_loader, train_dataset\n return train_loader,valid_loader,test_loader" }, { "identifier": "CTranModel", "path": "models/CTran.py", "snippet": "class CTranModel(nn.Module):\n def __init__(self,num_labels,use_lmt,pos_emb=False,layers=3,heads=4,dropout=0.1,int_loss=0,no_x_features=False, state_weight=None, label_weight=None):\n super(CTranModel, self).__init__()\n self.use_lmt = use_lmt\n \n self.no_x_features = no_x_features # (for no image features)\n\n # ResNet backbone\n self.backbone = Backbone()\n # self.backbone_c = BackboneCLIP()\n \n hidden = 512 # this should match the backbone output feature size\n\n self.downsample = False\n if self.downsample:\n self.conv_downsample = torch.nn.Conv2d(hidden,hidden,(1,1))\n \n # Label Embeddings\n self.label_input = torch.Tensor(np.arange(num_labels)).view(1,-1).long()\n self.label_lt = torch.nn.Embedding(num_labels, hidden, padding_idx=None)\n self.clip_label_lt = nn.Embedding.from_pretrained(label_weight, freeze=True, padding_idx=None)\n # State Embeddings\n self.known_label_lt = nn.Embedding.from_pretrained(state_weight, freeze=True, padding_idx=0)\n # self.known_label_lt = torch.nn.Embedding(3, hidden, padding_idx=0)\n\n # Position Embeddings (for image features)\n self.use_pos_enc = pos_emb\n if self.use_pos_enc:\n # self.position_encoding = PositionEmbeddingSine(int(hidden/2), normalize=True)\n self.position_encoding = positionalencoding2d(hidden, 18, 18).unsqueeze(0)\n\n # Transformer\n self.self_attn_layers = nn.ModuleList([SelfAttnLayer(hidden,heads,dropout) for _ in range(layers)])\n\n # Classifier\n # Output is of size num_labels because we want a separate classifier for each label\n \n self.output_linear = torch.nn.Linear(hidden,num_labels)\n\n # Other\n self.LayerNorm = nn.LayerNorm(hidden)\n self.dropout = nn.Dropout(dropout)\n\n # Init all except pretrained backbone\n self.label_lt.apply(weights_init)\n # below is just for c_tran original\n # self.known_label_lt.apply(weights_init)\n self.LayerNorm.apply(weights_init)\n self.self_attn_layers.apply(weights_init)\n self.output_linear.apply(weights_init)\n\n # only use backbone\n self.is_only_backbone = False\n self.use_ml_head = False\n self.decoder = MLDecoder(num_classes=num_labels, decoder_embedding=512, initial_num_features=512)\n\n\n def forward(self, images, mask, label_emb_type='ctran', clip_emb=None, clip_model=None):\n\n # decide the label embedding is learnable or not\n if label_emb_type == 'ctran':\n const_label_input = self.label_input.repeat(images.size(0),1).cuda()\n label_init_emb = self.label_lt(const_label_input)\n elif label_emb_type == 'onehot':\n const_label_input = F.one_hot(torch.arange(0, 17)) # (0~num_labels)\n label_init_emb = F.pad(const_label_input, pad=(0, 512 - const_label_input.shape[0], 0, 0)).unsqueeze(0)\n label_init_emb = torch.Tensor(label_init_emb).long().cuda()\n elif label_emb_type == 'clip':\n const_label_input = self.label_input.repeat(images.size(0),1).cuda()\n label_init_emb = self.clip_label_lt(const_label_input) \n \n features = self.backbone(images)\n if self.downsample:\n features = self.conv_downsample(features)\n if self.use_pos_enc:\n pos_encoding = self.position_encoding(features,torch.zeros(features.size(0),18,18, dtype=torch.bool).cuda())\n features = features + pos_encoding\n\n features = features.view(features.size(0),features.size(1),-1).permute(0,2,1) \n\n # Convert mask values to positive integers for nn.Embedding\n label_feat_vec = custom_replace(mask,0,1,2).long()\n\n # Get state embeddings\n state_embeddings = self.known_label_lt(label_feat_vec)\n init_label_embeddings = label_init_emb + state_embeddings\n \n if self.no_x_features:\n embeddings = init_label_embeddings \n else:\n embeddings = torch.cat((features, init_label_embeddings),1)\n # Feed image and label embeddings through Transformer\n embeddings = self.LayerNorm(embeddings)\n attns = []\n if not self.is_only_backbone:\n for layer in self.self_attn_layers:\n embeddings,attn = layer(embeddings,mask=None)\n attns += attn.detach().unsqueeze(0).data\n\n # Readout each label embedding using a linear layer\n # (1, 17, 512)\n label_embeddings = embeddings[:,-init_label_embeddings.size(1):,:]\n tmp_emb = embeddings[:,init_label_embeddings.size(1):,:]\n # Different decoder input?\n ## (1) resnet + label embedding out\n ## (2) only label embedding perform self-attn => not better than (1)\n ## (3) embedding out directly from encoder (visual + label emb) => best now\n if self.use_ml_head:\n for i in range(label_embeddings.shape[0]):\n if i == 0:\n output = self.decoder(tmp_emb[i].unsqueeze(0), label_embeddings[i].unsqueeze(0))\n else:\n output = torch.cat((output, self.decoder(tmp_emb[i].unsqueeze(0), label_embeddings[i].unsqueeze(0))))\n else:\n # (1, 17, 17)\n output = self.output_linear(label_embeddings) \n diag_mask = torch.eye(output.size(1)).unsqueeze(0).repeat(output.size(0),1,1).cuda()\n output = (output*diag_mask).sum(-1)\n \n return output,None,attns" }, { "identifier": "get_args", "path": "config_args.py", "snippet": "def get_args(parser,eval=False):\n parser.add_argument('--dataroot', type=str, default='./data/')\n parser.add_argument('--dataset', type=str, choices=['coco', 'voc','coco1000','nus','vg','news','cub', 'flair', 'flair_fed'], default='coco')\n ### change default by myself\n parser.add_argument('--workers', type=int, default=1)\n\n parser.add_argument('--results_dir', type=str, default='results/')\n parser.add_argument('--test_known', type=int, default=0)\n\n # Optimization\n parser.add_argument('--optim', type=str, choices=['adam', 'sgd', 'adamw'], default='adam')\n parser.add_argument('--lr', type=float, default=0.0002)\n parser.add_argument('--batch_size', type=int, default=32)\n parser.add_argument('--test_batch_size', type=int, default=-1)\n parser.add_argument('--grad_ac_steps', type=int, default=1)\n parser.add_argument('--scheduler_step', type=int, default=1000)\n parser.add_argument('--scheduler_gamma', type=float, default=0.1)\n parser.add_argument('--epochs', type=int, default=100)\n parser.add_argument('--int_loss', type=float, default=0.0)\n parser.add_argument('--aux_loss', type=float, default=0.0)\n parser.add_argument('--loss_type', type=str, choices=['bce', 'mixed','class_ce','soft_margin'], default='bce')\n parser.add_argument('--scheduler_type', type=str, choices=['plateau', 'step'], default='plateau')\n parser.add_argument('--loss_labels', type=str, choices=['all', 'unk'], default='all')\n parser.add_argument('--lr_decay', type=float, default=0)\n parser.add_argument('--weight_decay', type=float, default=1e-4)\n parser.add_argument('--max_samples', type=int, default=-1)\n parser.add_argument('--max_batches', type=int, default=-1)\n parser.add_argument('--warmup_scheduler', action='store_true',help='')\n parser.add_argument('--rho', type=float, default=0, help='Parameter controlling the momentum SGD')\n\n\n # Model\n parser.add_argument('--layers', type=int, default=3)\n parser.add_argument('--heads', type=int, default=4)\n parser.add_argument('--dropout', type=float, default=0.1)\n parser.add_argument('--pos_emb', action='store_true',help='positional encoding') \n parser.add_argument('--use_lmt', dest='use_lmt', action='store_true',help='label mask training') \n parser.add_argument('--freeze_backbone', action='store_true')\n parser.add_argument('--no_x_features', action='store_true')\n\n # CUB\n parser.add_argument('--attr_group_dict', type=str, default='')\n \n parser.add_argument('--n_groups', type=int, default=10,help='groups for CUB test time intervention')\n\n # FLAIR\n parser.add_argument('--flair_fine', action='store_true', help='whether use the fine-grained labels defined in FLAIR.')\n \n # Image Sizes\n # change the default values for FLAIR\n parser.add_argument('--scale_size', type=int, default=256)\n parser.add_argument('--crop_size', type=int, default=256)\n\n # Testing Models\n parser.add_argument('--inference', action='store_true')\n parser.add_argument('--resume', action='store_true')\n parser.add_argument('--saved_model_name', type=str, default='')\n \n parser.add_argument('--overwrite', action='store_true')\n parser.add_argument('--name', type=str, default='')\n\n # FL setting\n # TODO:\n parser.add_argument('--is_same_initial', type=int, default=1, help='Whether initial all the models with the same parameters in fedavg')\n parser.add_argument('--n_parties', type=int, default=20, help='number of workers in a distributed cluster')\n parser.add_argument('--comm_round', type=int, default=50, help='number of maximum communication round')\n parser.add_argument('--device', type=str, default='cuda:0', help='The device to run the program')\n parser.add_argument('--init_seed', type=int, default=514, help=\"Random seed\")\n parser.add_argument('--ckpt_path', type=str, default='', help='The path to the trained model (for inference usage)')\n\n\n # learnable embedding\n parser.add_argument('--learn_emb_type', type=str, choices=['ctran', 'onehot', 'clip'], default='ctran')\n parser.add_argument('--use_global_guide', action='store_true')\n parser.add_argument('--use_only_CLIP_visual', action='store_true')\n\n parser.add_argument('--alg', type=str, default='fedavg',\n help='fl algorithms: fedavg/fedprox/scaffold/fednova/moon')\n # visualize setting\n parser.add_argument('--visualize', action='store_true')\n\n # how to build coarse level CLIP embedding\n parser.add_argument('--coarse_prompt_type', type=str, choices=['avg', 'concat'], default='concat')\n # aggregation strategies\n parser.add_argument('--agg_type', type=str, choices=['fedavg', 'loss'], default='fedavg')\n # parser.add_argument('--sample', type=float, default=0.005, help='Sample ratio for each communication round')\n args = parser.parse_args()\n model_name = args.dataset\n if args.dataset == 'voc':\n args.num_labels = 20\n elif args.dataset == 'nus':\n args.num_labels = 1000\n elif args.dataset == 'coco1000':\n args.num_labels = 1000\n elif args.dataset == 'coco':\n args.num_labels = 80\n elif args.dataset == 'vg':\n args.num_labels = 500\n elif args.dataset == 'news':\n args.num_labels = 500\n elif args.dataset == 'cub':\n args.num_labels = 112\n # add FLAIR dataset \n elif args.dataset == 'flair' or args.dataset == 'flair_fed':\n if args.flair_fine:\n args.num_labels = 1628\n else:\n args.num_labels = 17\n else:\n print('dataset not included')\n exit()\n \n\n model_name += '.'+str(args.layers)+'layer'\n model_name += '.bsz_{}'.format(int(args.batch_size * args.grad_ac_steps))\n model_name += '.'+args.optim+str(args.lr)#.split('.')[1]\n if args.dataset == 'flair_fed':\n model_name += '.'+str(args.comm_round)+'round'\n print(f'Current embedding use:{args.learn_emb_type}')\n if args.learn_emb_type == 'ctran':\n model_name += '.ctran_emb'\n elif args.learn_emb_type == 'onehot':\n model_name += '.onehot_emb'\n elif args.learn_emb_type == 'clip':\n model_name += '.clip_emb'\n else:\n print('embedding setting is not included')\n exit()\n\n if args.use_global_guide:\n model_name += '.global_guide'\n \n if args.alg == 'fedavg':\n pass\n elif args.alg == 'fedprox':\n model_name += '.fedprox'\n else:\n print('FL setting is not implemented now')\n exit()\n\n if args.use_only_CLIP_visual:\n model_name += '.use_only_CLIP_visual'\n\n if args.agg_type == 'fedavg':\n model_name += 'agg_avg'\n elif args.agg_type == 'loss':\n model_name += 'agg_loss'\n else:\n print('FL setting is not included')\n exit()\n \n if args.coarse_prompt_type == 'avg':\n model_name += 'coarse_prompt_avg'\n elif args.coarse_prompt_type == 'concat':\n model_name += 'coarse_prompt_concat'\n else:\n print('FL setting is not included')\n exit()\n\n if args.use_lmt:\n model_name += '.lmt'\n args.loss_labels = 'unk'\n model_name += '.unk_loss'\n args.train_known_labels = 100\n else:\n args.train_known_labels = 0\n\n\n if args.pos_emb:\n model_name += '.pos_emb'\n\n if args.int_loss != 0.0:\n model_name += '.int_loss'+str(args.int_loss).split('.')[1]\n\n if args.aux_loss != 0.0:\n model_name += '.aux_loss'+str(args.aux_loss).replace('.','')\n\n if args.no_x_features:\n model_name += '.no_x_features'\n \n args.test_known_labels = int(args.test_known*0.01*args.num_labels)\n\n if args.dataset == 'cub':\n # reset the TOTAL number of labels to be concepts+classes\n model_name += '.step_{}'.format(args.scheduler_step)\n\n model_name += '.'+args.loss_type+'_loss'\n args.num_labels = 112+200\n\n args.attr_group_dict = {0: [0, 1, 2, 3], 1: [4, 5, 6, 7, 8, 9], 2: [10, 11, 12, 13, 14, 15], 3: [16, 17, 18, 19, 20, 21], 4: [22, 23, 24], 5: [25, 26, 27, 28, 29, 30], 6: [31], 7: [32, 33, 34, 35, 36], 8: [37, 38], 9: [39, 40, 41, 42, 43, 44], 10: [45, 46, 47, 48, 49], 11: [50], 12: [51, 52], 13: [53, 54, 55, 56, 57, 58], 14: [59, 60, 61, 62, 63], 15: [64, 65, 66, 67, 68, 69], 16: [70, 71, 72, 73, 74, 75], 17: [76, 77], 18: [78, 79, 80], 19: [81, 82], 20: [83, 84, 85], 21: [86, 87, 88], 22: [89], 23: [90, 91, 92, 93, 94, 95], 24: [96, 97, 98], 25: [99, 100, 101], 26: [102, 103, 104, 105, 106, 107], 27: [108, 109, 110, 111]}\n\n if args.flair_fine:\n model_name += '.fine_grained'\n \n if args.dataset == 'flair_fed':\n model_name += f'.client={args.n_parties}'\n\n if args.name != '':\n model_name += '.'+args.name\n \n if not os.path.exists(args.results_dir):\n os.makedirs(args.results_dir)\n \n model_name = os.path.join(args.results_dir,model_name)\n \n args.model_name = model_name\n\n\n if args.inference:\n args.epochs = 1\n\n \n if os.path.exists(args.model_name) and (not args.overwrite) and (not 'test' in args.name) and (not eval) and (not args.inference) and (not args.resume):\n print(args.model_name)\n overwrite_status = input('Already Exists. Overwrite?: ')\n if overwrite_status == 'rm':\n os.system('rm -rf '+args.model_name)\n elif not 'y' in overwrite_status:\n exit(0)\n elif not os.path.exists(args.model_name):\n os.makedirs(args.model_name)\n\n\n return args" }, { "identifier": "WarmupLinearSchedule", "path": "optim_schedule.py", "snippet": "class WarmupLinearSchedule(LambdaLR):\n \"\"\" Linear warmup and then linear decay.\n Linearly increases learning rate from 0 to 1 over `warmup_steps` training steps.\n Linearly decreases learning rate from 1. to 0. over remaining `t_total - warmup_steps`\n steps.\n \"\"\"\n def __init__(self, optimizer, warmup_steps, t_total, last_epoch=-1):\n self.warmup_steps = warmup_steps\n self.t_total = t_total\n super(WarmupLinearSchedule, self).__init__(\n optimizer, self.lr_lambda, last_epoch=last_epoch)\n\n def lr_lambda(self, step):\n if step < self.warmup_steps:\n return float(step) / float(max(1, self.warmup_steps))\n return max(0.0, float(self.t_total - step) / float(\n max(1.0, self.t_total - self.warmup_steps)))" }, { "identifier": "run_epoch", "path": "run_epoch.py", "snippet": "def run_epoch(args,model,data,optimizer,epoch,desc,train=False,warmup_scheduler=None, global_model=None, emb_feat=None, clip_model=None, tau=None):\n if train:\n model.train()\n optimizer.zero_grad()\n else:\n model.eval()\n\n # pre-allocate full prediction and target tensors\n all_predictions = torch.zeros(len(data.dataset),args.num_labels).cpu()\n all_targets = torch.zeros(len(data.dataset),args.num_labels).cpu()\n all_masks = torch.zeros(len(data.dataset),args.num_labels).cpu()\n all_image_ids = []\n batch_idx = 0\n loss_total = 0\n unk_loss_total = 0\n if train:\n if args.dataset == 'flair_fed' or args.dataset == 'coco' or args.dataset == 'voc':\n data_loader = data\n else:\n data_loader = tqdm(data,mininterval=0.5,desc=desc,leave=True,ncols=100)\n else:\n data_loader = tqdm(data,mininterval=0.5,desc=desc,leave=True,ncols=100)\n for batch in data_loader:\n \n\n labels = batch['labels'].float()\n images = batch['image'].float()\n mask = batch['mask'].float()\n \n # Original setting\n mask_in = mask.clone()\n if args.use_global_guide and train:\n with torch.no_grad():\n mask_g = mask_in.clone()\n for idx, m in enumerate(mask_g[0]):\n mask_g[0][idx] = -1.\n global_pred,_,_ = global_model(images.cuda(),mask_g.cuda(), args.learn_emb_type, emb_feat, clip_model)\n global_pred = global_pred.data.cpu()\n # print(global_pred.shape)\n # print(global_pred)\n global_logits = F.sigmoid(global_pred)\n \n # TODO: (for rebuttal) global pred. masking \n for idx, m in enumerate(mask_in[0]):\n if 0.48 <= global_logits[0][idx].item() <= 0.52:\n # mask this\n mask_in[0][idx] = -1.\n \n # mask -1, 0, 1 -> assigned become 1, 0, 0\n unk_mask = custom_replace(mask_in,1,0,0)\n all_image_ids += batch['imageIDs']\n\n ### TODO: CLIP\n # idea 1: label text to replace the label embedding in c_tran => there is a \"???\" in the scene\n # idea 2: [prompt] [label_text] => can be tuned\n if train:\n pred,int_pred,attns = model(images.cuda(),mask_in.cuda(), args.learn_emb_type, emb_feat, clip_model)\n else:\n for idx, m in enumerate(mask_in[0]):\n mask_in[0][idx] = -1.\n with torch.no_grad():\n pred,int_pred,attns = model(images.cuda(),mask_in.cuda(), args.learn_emb_type, emb_feat, clip_model)\n\n if args.dataset == 'cub':\n class_label = batch['class_label'].float()\n concept_certainty = batch['concept_certainty'].float()\n\n class_label_onehot = torch.zeros(class_label.size(0),200)\n class_label_onehot.scatter_(1,class_label.long(),1)\n\n labels = torch.cat((labels,class_label_onehot),1)\n loss = F.binary_cross_entropy_with_logits(pred.view(labels.size(0),-1),labels.cuda(),reduction='none')\n loss = (unk_mask.cuda()*loss).sum()/unk_mask.detach().sum().item()\n\n aux_loss = F.binary_cross_entropy_with_logits(int_pred.view(labels.size(0),-1),labels.cuda(),reduction='none')\n aux_loss = (unk_mask.cuda()*aux_loss).sum()/unk_mask.detach().sum().item()\n\n loss_out = 1.0*loss + float(args.aux_loss)*aux_loss\n loss = loss_out\n\n else:\n # TODO: (1) change to focal loss\n # TODO: (2) change to ASL\n loss = F.binary_cross_entropy_with_logits(pred.view(labels.size(0),-1),labels.cuda(),reduction='none')\n # loss = sigmoid_focal_loss(pred.view(labels.size(0),-1), labels.cuda(), alpha=0.005, gamma=5, reduction=None)\n # cri = AsymmetricLoss()\n if args.loss_labels == 'unk': \n # only use unknown labels for loss\n loss_out = (unk_mask.cuda()*loss).sum()\n else: \n # use all labels for loss\n loss_out = loss.sum() \n\n if train:\n # (FedProx): add proximal term\n if args.alg == 'fedprox':\n global_weight_collector = list(global_model.parameters())\n mu = 0.001\n #for fedprox\n fed_prox_reg = 0.0\n for param_index, param in enumerate(model.parameters()):\n fed_prox_reg += ((mu / 2) * torch.norm((param - global_weight_collector[param_index]))**2)\n loss_out += fed_prox_reg\n loss_out.backward()\n # Grad Accumulation\n if ((batch_idx + 1) % args.grad_ac_steps == 0):\n torch.nn.utils.clip_grad_norm_(parameters=model.parameters(), max_norm=10.0, norm_type=2)\n optimizer.step()\n optimizer.zero_grad()\n if warmup_scheduler is not None:\n warmup_scheduler.step()\n ## Updates ##\n loss_total += loss_out.item()\n unk_loss_total += loss_out.item()\n start_idx,end_idx=(batch_idx*data.batch_size),((batch_idx+1)*data.batch_size)\n \n if pred.size(0) != all_predictions[start_idx:end_idx].size(0):\n pred = pred.view(labels.size(0),-1)\n \n all_predictions[start_idx:end_idx] = pred.data.cpu()\n all_targets[start_idx:end_idx] = labels.data.cpu()\n\n all_masks[start_idx:end_idx] = mask_in.data.cpu()\n\n batch_idx += 1\n if args.dataset == 'flair':\n data_loader.set_description(f'Testing')\n data_loader.set_postfix(loss=f'{loss_total / (batch_idx + 1):.4f}')\n elif args.dataset == 'flair_fed' or args.dataset == 'coco' or args.dataset == 'voc':\n if not train:\n data_loader.set_description(f'Testing')\n data_loader.set_postfix(loss=f'{loss_total / (batch_idx + 1):.4f}')\n \n\n loss_total = loss_total/float(all_predictions.size(0))\n unk_loss_total = unk_loss_total/float(all_predictions.size(0))\n\n return all_predictions,all_targets,all_masks,all_image_ids,loss_total,unk_loss_total" } ]
import torch import argparse import numpy as np import utils.evaluate as evaluate import utils.logger as logger import logging import datetime import os import random import clip import json from load_data import get_data from models import CTranModel from config_args import get_args from optim_schedule import WarmupLinearSchedule from run_epoch import run_epoch from tqdm import tqdm from scipy.special import softmax
10,388
def init_nets(args, is_global=False, state_weight=None, label_weight=None): if is_global: n_parties = 1 else: n_parties = args.n_parties nets = {net_i: None for net_i in range(n_parties)} ### FLAIR for net_i in range(n_parties): model = CTranModel(args.num_labels,args.use_lmt,args.pos_emb,args.layers,args.heads,args.dropout,args.no_x_features, state_weight=state_weight, label_weight=label_weight) nets[net_i] = model model_meta_data = [] layer_type = [] for (k, v) in nets[0].state_dict().items(): model_meta_data.append(v.shape) layer_type.append(k) return nets, model_meta_data, layer_type def local_train_net(nets, args, u_id, test_dl = None, device="cpu", g_model=None, emb_feat=None, clip_model=None): data_pts = 0 net_dataidx_map = {} loss_based_agg_list = [] for net_id, net in nets.items(): net.to(device) # TODO: for COCO-dataset, just use indexing of the original dataset to have new subset dataset # TODO: VOC dataset is similar if args.dataset == 'coco' or args.dataset == 'voc': sub_dst = torch.utils.data.Subset(train_dl_global.dataset, partition_idx_map[net_id]) train_dl_local = torch.utils.data.DataLoader(sub_dst, batch_size=args.batch_size,shuffle=True, num_workers=args.workers,drop_last=False) net_dataidx_map[net_id] = len(sub_dst) data_pts += len(sub_dst) else: train_dl_local, test_dl, _, train_dataset = get_data(args, curr_user=u_id[net_id]) # for fedavg net_dataidx_map[net_id] = len(train_dataset) data_pts += len(train_dataset) n_epoch = args.epochs train_metrics, testacc = train_net(net_id, net, train_dl_local, test_dl, n_epoch, args, device=device, g_model=g_model, emb_feat=emb_feat, clip_model=clip_model) # for loss-based agg. loss_based_agg_list.append(train_metrics['loss']) return data_pts, net_dataidx_map, loss_based_agg_list def train_net(net_id, model, train_dataloader, valid_dataloader, epochs, args, device="cpu", g_model=None, emb_feat=None, clip_model=None): fl_logger.info('Training network %s' % str(net_id)) loss_logger = logger.LossLogger(args.model_name) if args.optim == 'adam': optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()),lr=args.lr)#, weight_decay=0.0004) elif args.optim == 'adamw': optimizer = torch.optim.AdamW(filter(lambda p: p.requires_grad, model.parameters()),lr=args.lr) else: optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, momentum=0.9, weight_decay=1e-4) if args.warmup_scheduler: step_scheduler = None scheduler_warmup = WarmupLinearSchedule(optimizer, 1, 300000) else: scheduler_warmup = None if args.scheduler_type == 'plateau': step_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,mode='min',factor=0.1,patience=5) elif args.scheduler_type == 'step': step_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.scheduler_step, gamma=args.scheduler_gamma) else: step_scheduler = None test_loader = None for epoch in range(epochs): all_preds, all_targs, all_masks, all_ids, train_loss, train_loss_unk = run_epoch(args,model,train_dataloader,optimizer,epoch,'Training',train=True,warmup_scheduler=scheduler_warmup,global_model=g_model,emb_feat=emb_feat, clip_model=clip_model) train_metrics = evaluate.compute_metrics(args,all_preds,all_targs,all_masks,train_loss,train_loss_unk,0,args.train_known_labels, verbose=False) loss_logger.log_losses('train.log',epoch,train_loss,train_metrics,train_loss_unk) if step_scheduler is not None: if args.scheduler_type == 'step': step_scheduler.step(epoch) elif args.scheduler_type == 'plateau': step_scheduler.step(train_loss_unk) fl_logger.info(f'{train_metrics["mAP"]}, {train_metrics["CF1"]}, {train_metrics["loss"]:.3f}') test_acc = 0 fl_logger.info(' ** Training complete **') return train_metrics, test_acc if __name__ == '__main__':
def init_nets(args, is_global=False, state_weight=None, label_weight=None): if is_global: n_parties = 1 else: n_parties = args.n_parties nets = {net_i: None for net_i in range(n_parties)} ### FLAIR for net_i in range(n_parties): model = CTranModel(args.num_labels,args.use_lmt,args.pos_emb,args.layers,args.heads,args.dropout,args.no_x_features, state_weight=state_weight, label_weight=label_weight) nets[net_i] = model model_meta_data = [] layer_type = [] for (k, v) in nets[0].state_dict().items(): model_meta_data.append(v.shape) layer_type.append(k) return nets, model_meta_data, layer_type def local_train_net(nets, args, u_id, test_dl = None, device="cpu", g_model=None, emb_feat=None, clip_model=None): data_pts = 0 net_dataidx_map = {} loss_based_agg_list = [] for net_id, net in nets.items(): net.to(device) # TODO: for COCO-dataset, just use indexing of the original dataset to have new subset dataset # TODO: VOC dataset is similar if args.dataset == 'coco' or args.dataset == 'voc': sub_dst = torch.utils.data.Subset(train_dl_global.dataset, partition_idx_map[net_id]) train_dl_local = torch.utils.data.DataLoader(sub_dst, batch_size=args.batch_size,shuffle=True, num_workers=args.workers,drop_last=False) net_dataidx_map[net_id] = len(sub_dst) data_pts += len(sub_dst) else: train_dl_local, test_dl, _, train_dataset = get_data(args, curr_user=u_id[net_id]) # for fedavg net_dataidx_map[net_id] = len(train_dataset) data_pts += len(train_dataset) n_epoch = args.epochs train_metrics, testacc = train_net(net_id, net, train_dl_local, test_dl, n_epoch, args, device=device, g_model=g_model, emb_feat=emb_feat, clip_model=clip_model) # for loss-based agg. loss_based_agg_list.append(train_metrics['loss']) return data_pts, net_dataidx_map, loss_based_agg_list def train_net(net_id, model, train_dataloader, valid_dataloader, epochs, args, device="cpu", g_model=None, emb_feat=None, clip_model=None): fl_logger.info('Training network %s' % str(net_id)) loss_logger = logger.LossLogger(args.model_name) if args.optim == 'adam': optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()),lr=args.lr)#, weight_decay=0.0004) elif args.optim == 'adamw': optimizer = torch.optim.AdamW(filter(lambda p: p.requires_grad, model.parameters()),lr=args.lr) else: optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, momentum=0.9, weight_decay=1e-4) if args.warmup_scheduler: step_scheduler = None scheduler_warmup = WarmupLinearSchedule(optimizer, 1, 300000) else: scheduler_warmup = None if args.scheduler_type == 'plateau': step_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,mode='min',factor=0.1,patience=5) elif args.scheduler_type == 'step': step_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.scheduler_step, gamma=args.scheduler_gamma) else: step_scheduler = None test_loader = None for epoch in range(epochs): all_preds, all_targs, all_masks, all_ids, train_loss, train_loss_unk = run_epoch(args,model,train_dataloader,optimizer,epoch,'Training',train=True,warmup_scheduler=scheduler_warmup,global_model=g_model,emb_feat=emb_feat, clip_model=clip_model) train_metrics = evaluate.compute_metrics(args,all_preds,all_targs,all_masks,train_loss,train_loss_unk,0,args.train_known_labels, verbose=False) loss_logger.log_losses('train.log',epoch,train_loss,train_metrics,train_loss_unk) if step_scheduler is not None: if args.scheduler_type == 'step': step_scheduler.step(epoch) elif args.scheduler_type == 'plateau': step_scheduler.step(train_loss_unk) fl_logger.info(f'{train_metrics["mAP"]}, {train_metrics["CF1"]}, {train_metrics["loss"]:.3f}') test_acc = 0 fl_logger.info(' ** Training complete **') return train_metrics, test_acc if __name__ == '__main__':
args = get_args(argparse.ArgumentParser())
2
2023-12-09 09:16:59+00:00
12k
AgriCodeHub/dairy-django-backend
tests/health/tests/conftest.py
[ { "identifier": "CowAvailabilityChoices", "path": "core/choices.py", "snippet": "class CowAvailabilityChoices(models.TextChoices):\n \"\"\"\n Choices for the availability status of a cow.\n\n Choices:\n - `ALIVE`: Cow is alive and active.\n - `SOLD`: Cow has been sold.\n - `DEAD`: Cow has died.\n\n Usage:\n These choices represent the availability status of a cow in the Cow model.\n Use these choices when defining or querying Cow instances to represent the current status of a cow.\n\n Example:\n ```\n class Cow(models.Model):\n availability_status = models.CharField(max_length=50, choices=CowAvailabilityChoices.choices)\n ```\n \"\"\"\n\n ALIVE = \"Alive\"\n SOLD = \"Sold\"\n DEAD = \"Dead\"" }, { "identifier": "CowBreedChoices", "path": "core/choices.py", "snippet": "class CowBreedChoices(models.TextChoices):\n \"\"\"\n Enumeration of choices for representing different cow breeds.\n\n Choices:\n - `FRIESIAN`: Represents the Friesian cow breed.\n - `SAHIWAL`: Represents the Sahiwal cow breed.\n - `JERSEY`: Represents the Jersey cow breed.\n - `GUERNSEY`: Represents the Guernsey cow breed.\n - `CROSSBREED`: Represents a crossbreed of cows.\n - `AYRSHIRE`: Represents the Ayrshire cow breed.\n\n Usage:\n This enumeration provides predefined choices for the cow breed field in the CowBreed model.\n Use these choices when defining or querying CowBreed instances to represent specific cow breeds.\n\n Example:\n ```\n class CowBreed(models.Model):\n name = models.CharField(max_length=50, choices=CowBreedChoices.choices)\n ```\n\n \"\"\"\n\n FRIESIAN = \"Friesian\"\n SAHIWAL = \"Sahiwal\"\n JERSEY = \"Jersey\"\n GUERNSEY = \"Guernsey\"\n CROSSBREED = \"Crossbreed\"\n AYRSHIRE = \"Ayrshire\"" }, { "identifier": "CowCategoryChoices", "path": "core/choices.py", "snippet": "class CowCategoryChoices(models.TextChoices):\n \"\"\"\n Choices for the category of a cow.\n\n Choices:\n - `CALF`: Represents a calf.\n - `WEANER`: Represents a weaner.\n - `HEIFER`: Represents a heifer.\n - `BULL`: Represents a bull.\n - `MILKING_COW`: Represents a milking cow.\n\n Usage:\n These choices represent the category of a cow in the Cow model.\n Use these choices when defining or querying Cow instances to represent the category of a cow.\n\n Example:\n ```\n class Cow(models.Model):\n category = models.CharField(max_length=15, choices=CowCategoryChoices.choices)\n ```\n \"\"\"\n\n CALF = \"Calf\"\n WEANER = \"Weaner\"\n HEIFER = \"Heifer\"\n BULL = \"Bull\"\n MILKING_COW = \"Milking Cow\"" }, { "identifier": "CowPregnancyChoices", "path": "core/choices.py", "snippet": "class CowPregnancyChoices(models.TextChoices):\n \"\"\"\n Choices for the pregnancy status of a cow.\n\n Choices:\n - `OPEN`: Cow is not pregnant.\n - `PREGNANT`: Cow is pregnant.\n - `CALVED`: Cow has calved.\n - `UNAVAILABLE`: Cow cannot have pregnancy status.\n\n Usage:\n These choices represent the pregnancy status of a cow in the Cow model.\n Use these choices when defining or querying Cow instances to represent the current pregnancy status of a cow.\n\n Example:\n ```\n class Cow(models.Model):\n current_pregnancy_status = models.CharField(max_length=15, choices=CowPregnancyChoices.choices)\n ```\n \"\"\"\n\n OPEN = \"Open\"\n PREGNANT = \"Pregnant\"\n CALVED = \"Calved\"\n UNAVAILABLE = \"Unavailable\"" }, { "identifier": "CowProductionStatusChoices", "path": "core/choices.py", "snippet": "class CowProductionStatusChoices(models.TextChoices):\n \"\"\"\n Choices for the production status of a cow.\n\n Choices:\n - `OPEN`: Cow is open (not pregnant or lactating).\n - `PREGNANT_NOT_LACTATING`: Cow is pregnant but not lactating.\n - `PREGNANT_AND_LACTATING`: Cow is pregnant and lactating.\n - `DRY`: Cow is dry (not lactating).\n - `CULLED`: Cow has been culled.\n - `QUARANTINED`: Cow is quarantined.\n - `BULL`: Represents a bull.\n - `YOUNG_BULL`: Represents a young bull.\n - `YOUNG_HEIFER`: Represents a young heifer.\n - `MATURE_BULL`: Represents a mature bull.\n - `CALF`: Represents a calf.\n - `WEANER`: Represents a weaner.\n\n Usage:\n These choices represent the production status of a cow in the Cow model.\n Use these choices when defining or querying Cow instances to represent the current production status of a cow.\n\n Example:\n ```\n class Cow(models.Model):\n current_production_status = models.CharField(max_length=15, choices=CowProductionStatusChoices.choices)\n ```\n \"\"\"\n\n OPEN = \"Open\"\n PREGNANT_NOT_LACTATING = \"Pregnant not Lactating\"\n PREGNANT_AND_LACTATING = \"Pregnant and Lactating\"\n DRY = \"Dry\"\n CULLED = \"Culled\"\n QUARANTINED = \"Quarantined\"\n BULL = \"Bull\"\n YOUNG_BULL = \"Young Bull\"\n YOUNG_HEIFER = \"Young Heifer\"\n MATURE_BULL = \"Mature Bull\"\n CALF = \"Calf\"\n WEANER = \"Weaner\"" }, { "identifier": "CowSerializer", "path": "core/serializers.py", "snippet": "class CowSerializer(serializers.ModelSerializer):\n \"\"\"\n Serializer for the Cow model.\n\n Fields:\n - `breed`: A nested serializer field representing the cow breed, using CowBreedSerializer.\n - `tag_number`: A read-only field representing the cow's tag number.\n - `parity`: A read-only field representing the cow's parity.\n - `age`: A read-only field representing the cow's age in days.\n - `age_in_farm`: A read-only field representing the cow's age in days since introduction to the farm.\n - And more...\n\n Meta:\n - `model`: The Cow model for which the serializer is defined.\n - `fields`: The fields to include in the serialized representation.\n\n Usage:\n Use this serializer to convert Cow model instances to JSON representations\n and vice versa. It includes nested serialization for the 'breed' field and\n read-only fields for additional information such as tag number and age.\n\n Methods:\n - `create(validated_data)`: Overrides the default create method to handle nested serialization for the 'breed' field.\n - `update(instance, validated_data)`: Overrides the default update method to exclude certain fields from updating.\n\n Example:\n ```\n class Cow(models.Model):\n breed = models.ForeignKey(CowBreed, on_delete=models.CASCADE)\n tag_number = models.CharField(max_length=20)\n parity = models.IntegerField()\n age = models.IntegerField()\n age_in_farm = models.IntegerField()\n\n class CowSerializer(serializers.ModelSerializer):\n breed = CowBreedSerializer()\n tag_number = serializers.ReadOnlyField()\n parity = serializers.ReadOnlyField()\n age = serializers.ReadOnlyField()\n age_in_farm = serializers.ReadOnlyField()\n\n class Meta:\n model = Cow\n fields = \"__all__\"\n ```\n \"\"\"\n\n breed = CowBreedSerializer()\n tag_number = serializers.ReadOnlyField()\n parity = serializers.ReadOnlyField()\n age = serializers.ReadOnlyField()\n age_in_farm = serializers.ReadOnlyField()\n\n class Meta:\n model = Cow\n fields = \"__all__\"\n\n def create(self, validated_data):\n breed_data = validated_data.pop(\"breed\")\n breed, _ = CowBreed.objects.get_or_create(**breed_data)\n\n cow = Cow.objects.create(breed=breed, **validated_data)\n return cow\n\n def update(self, instance, validated_data):\n fields_to_exclude = [\n \"breed\",\n \"gender\",\n \"sire\",\n \"dam\",\n \"is_bought\",\n \"date_introduced_in_farm\",\n ]\n for field in fields_to_exclude:\n validated_data.pop(field, None)\n return super().update(instance, validated_data)" }, { "identifier": "CullingReasonChoices", "path": "health/choices.py", "snippet": "class CullingReasonChoices(models.TextChoices):\n \"\"\"\n Choices for reasons behind culling a cow.\n\n Choices:\n - `INJURIES`: Culling due to injuries.\n - `CHRONIC_HEALTH`: Culling due to chronic health issues.\n - `COST_OF_CARE`: Culling due to the high cost of care.\n - `UNPROFITABLE`: Culling because the cow is unprofitable.\n - `LOW_MARKET_DEMAND`: Culling due to low market demand.\n - `AGE`: Culling based on the age of the cow.\n - `CONSISTENT_LOW_PRODUCTION`: Culling due to consistently low milk production.\n - `LOW_QUALITY`: Culling due to low-quality milk production.\n - `INEFFICIENT_FEED_CONVERSION`: Culling due to inefficient feed conversion.\n - `INHERITED_DISEASES`: Culling due to inherited diseases.\n - `INBREEDING`: Culling due to inbreeding concerns.\n - `UNWANTED_TRAITS`: Culling due to unwanted traits.\n - `CLIMATE_CHANGE`: Culling due to the impact of climate change.\n - `NATURAL_DISASTER`: Culling due to natural disasters.\n - `OVERPOPULATION`: Culling due to overpopulation concerns.\n - `GOVERNMENT_REGULATIONS`: Culling to comply with government regulations.\n - `ANIMAL_WELFARE_STANDARDS`: Culling to meet animal welfare standards.\n - `ENVIRONMENT_PROTECTION_LAWS`: Culling to comply with environmental protection laws.\n\n Usage:\n These choices represent various reasons for culling a cow and are used as options in the CullingRecord model.\n\n Example:\n ```\n class CullingRecord(models.Model):\n cow = models.OneToOneField(Cow, on_delete=models.CASCADE, related_name=\"culling_record\")\n reason = models.CharField(max_length=35, choices=CullingReasonChoices.choices)\n notes = models.TextField(null=True, max_length=100)\n date_carried = models.DateField(auto_now_add=True)\n ```\n \"\"\"\n\n # MEDICAL_REASONS\n INJURIES = \"Injuries\"\n CHRONIC_HEALTH = \"Chronic Health Issues\"\n\n # FINANCIAL_REASONS\n COST_OF_CARE = \"Cost Of Care\"\n UNPROFITABLE = \"Unprofitable\"\n LOW_MARKET_DEMAND = \"Low Market Demand\"\n\n # PRODUCTION_REASONS\n AGE = \"Age\"\n CONSISTENT_LOW_PRODUCTION = \"Consistent Low Production\"\n CONSISTENT_POOR_QUALITY = \"Low Quality\"\n INEFFICIENT_FEED_CONVERSION = \"Inefficient Feed Conversion\"\n\n # GENETIC_REASONS\n INHERITED_DISEASES = \"Inherited Diseases\"\n INBREEDING = \"Inbreeding\"\n UNWANTED_TRAITS = \"Unwanted Traits\"\n\n # ENVIRONMENTAL_REASONS\n CLIMATE_CHANGE = \"Climate Change\"\n NATURAL_DISASTER = \"Natural Disaster\"\n OVERPOPULATION = \"Overpopulation\"\n\n # LEGAL_REASONS\n GOVERNMENT_REGULATIONS = \"Government Regulations\"\n ANIMAL_WELFARE_STANDARDS = \"Animal Welfare Standards\"\n ENVIRONMENT_PROTECTION_LAWS = \"Environmental Protection Laws\"" }, { "identifier": "SymptomLocationChoices", "path": "health/choices.py", "snippet": "class SymptomLocationChoices(models.TextChoices):\n \"\"\"\n Choices for the location of symptoms reported in cows.\n\n Choices:\n - `HEAD`: Symptom located in the head.\n - `NECK`: Symptom located in the neck.\n - `CHEST`: Symptom located in the chest.\n - `ABDOMEN`: Symptom located in the abdomen.\n - `BACK`: Symptom located in the back.\n - `LEGS`: Symptom located in the legs.\n - `TAIL`: Symptom located in the tail.\n - `WHOLE_BODY`: Symptom affecting the whole body.\n - `OTHER`: Other locations not specified.\n\n Usage:\n These choices represent different locations of symptoms reported in cows and are used as options\n in the Symptoms model.\n\n Example:\n ```\n class Symptoms(models.Model):\n location = models.CharField(max_length=20, choices=SymptomLocationChoices.choices)\n ```\n \"\"\"\n\n HEAD = \"Head\"\n NECK = \"Neck\"\n CHEST = \"Chest\"\n ABDOMEN = \"Abdomen\"\n BACK = \"Back\"\n LEGS = \"Legs\"\n TAIL = \"Tail\"\n WHOLE_BODY = \"Whole body\"\n OTHER = \"Other\"" }, { "identifier": "SymptomSeverityChoices", "path": "health/choices.py", "snippet": "class SymptomSeverityChoices(models.TextChoices):\n \"\"\"\n Choices for the severity of symptoms reported in cows.\n\n Choices:\n - `MILD`: Mild severity of the symptom.\n - `MODERATE`: Moderate severity of the symptom.\n - `SEVERE`: Severe severity of the symptom.\n\n Usage:\n These choices represent different levels of severity for symptoms reported in cows and are used as options\n in the Symptoms model.\n\n Example:\n ```\n class Symptoms(models.Model):\n severity = models.CharField(max_length=20, choices=SymptomSeverityChoices.choices)\n ```\n \"\"\"\n\n MILD = \"Mild\"\n MODERATE = \"Moderate\"\n SEVERE = \"Severe\"" }, { "identifier": "SymptomTypeChoices", "path": "health/choices.py", "snippet": "class SymptomTypeChoices(models.TextChoices):\n \"\"\"\n Choices for types of symptoms reported in cows.\n\n Choices:\n - `RESPIRATORY`: Symptom related to the respiratory system.\n - `DIGESTIVE`: Symptom related to the digestive system.\n - `REPRODUCTIVE`: Symptom related to the reproductive system.\n - `PHYSICAL`: Physical symptom not specific to a particular system.\n - `MUSCULOSKELETAL`: Symptom related to the musculoskeletal system.\n - `METABOLIC`: Symptom related to metabolic functions.\n - `OTHER`: Other types of symptoms.\n\n Usage:\n These choices represent different types of symptoms reported in cows and are used as options\n in the Symptoms model.\n\n Example:\n ```\n class Symptoms(models.Model):\n symptom_type = models.CharField(max_length=20, choices=SymptomTypeChoices.choices)\n ```\n \"\"\"\n\n RESPIRATORY = \"Respiratory\"\n DIGESTIVE = \"Digestive\"\n REPRODUCTIVE = \"Reproductive\"\n PHYSICAL = \"Physical\"\n MUSCULOSKELETAL = \"Musculoskeletal\"\n METABOLIC = \"Metabolic\"\n OTHER = \"Other\"" }, { "identifier": "PathogenChoices", "path": "health/choices.py", "snippet": "class PathogenChoices(models.TextChoices):\n \"\"\"\n Choices for types of pathogens affecting a cow.\n\n Choices:\n - `BACTERIA`: Bacterial infection.\n - `VIRUS`: Viral infection.\n - `FUNGI`: Fungal infection.\n - `UNKNOWN`: Unknown pathogen.\n\n Usage:\n These choices represent different types of pathogens affecting a cow and are used as options in PathogenRecord.\n\n Example:\n ```\n class PathogenRecord(models.Model):\n name= models.CharField(max_length=10, choices=PathogenChoices.choices)\n # diagnosis_date = models.DateField(auto_now_add=True)\n ```\n \"\"\"\n\n BACTERIA = \"Bacteria\"\n VIRUS = \"Virus\"\n FUNGI = \"Fungi\"\n UNKNOWN = \"Unknown\"" }, { "identifier": "DiseaseCategoryChoices", "path": "health/choices.py", "snippet": "class DiseaseCategoryChoices(models.TextChoices):\n \"\"\"\n Choices for categories of diseases affecting a cow.\n\n Choices:\n - `NUTRITION`: Disease caused by nutritional deficiencies.\n - `INFECTIOUS`: Disease caused by infectious agents.\n - `PHYSIOLOGICAL`: Disease caused by physiological factors.\n - `GENETIC`: Disease caused by genetic factors.\n\n Usage:\n These choices represent different categories of diseases affecting a cow and are used as options\n in the DiseaseCategory model.\n\n Example:\n ```\n class DiseaseCategory(models.Model):\n name = models.CharField(max_length=15, choices=DiseaseCategoryChoices.choices)\n ```\n \"\"\"\n\n NUTRITION = \"Nutrition\"\n INFECTIOUS = \"Infectious\"\n PHYSIOLOGICAL = \"Physiological\"\n GENETIC = \"Genetic\"" }, { "identifier": "TreatmentStatusChoices", "path": "health/choices.py", "snippet": "class TreatmentStatusChoices(models.TextChoices):\n \"\"\"\n Choices for the status of treatments given to cows.\n\n Choices:\n - `SCHEDULED`: Treatment is scheduled but not initiated.\n - `IN_PROGRESS`: Treatment is currently in progress.\n - `COMPLETED`: Treatment has been successfully completed.\n - `CANCELLED`: Treatment was cancelled before completion.\n - `POSTPONED`: Treatment was postponed to a later date.\n\n Usage:\n These choices represent different statuses of treatments given to cows and are used as options\n in the Treatment model.\n\n Example:\n ```\n class Treatment(models.Model):\n treatment_status = models.CharField(\n max_length=15,\n choices=TreatmentStatusChoices.choices,\n default=TreatmentStatusChoices.SCHEDULED,\n )\n ```\n \"\"\"\n\n SCHEDULED = \"Scheduled\"\n IN_PROGRESS = \"In Progress\"\n COMPLETED = \"Completed\"\n CANCELLED = \"Cancelled\"\n POSTPONED = \"Postponed\"" }, { "identifier": "Pathogen", "path": "health/models.py", "snippet": "class Pathogen(models.Model):\n \"\"\"\n Represents a pathogen affecting a cow.\n\n Attributes:\n - `name` (str): The type of pathogen, chosen from predefined choices.\n\n Methods:\n - `clean`: Validates the name of the pathogen.\n \"\"\"\n\n name = models.CharField(max_length=10, choices=PathogenChoices.choices, unique=True)\n # diagnosis_date = models.DateField(auto_now_add=True)\n\n def clean(self):\n \"\"\"\n Validate the name of the pathogen.\n \"\"\"\n PathogenValidator.validate_name(self.name)\n\n def save(self, *args, **kwargs):\n \"\"\"\n Overrides the save method to perform additional validation before saving.\n \"\"\"\n self.clean()\n super().save(*args, **kwargs)" }, { "identifier": "DiseaseCategory", "path": "health/models.py", "snippet": "class DiseaseCategory(models.Model):\n \"\"\"\n Represents a category of diseases affecting cows.\n\n Attributes:\n - `name` (str): The name of the disease category, chosen from predefined choices.\n\n Methods:\n - `clean`: Validates the name of the disease category.\n \"\"\"\n\n name = models.CharField(\n max_length=15, choices=DiseaseCategoryChoices.choices, unique=True\n )\n\n def clean(self):\n \"\"\"\n Validate the name of the disease category.\n \"\"\"\n DiseaseCategoryValidator.validate_name(self.name)\n\n def __str__(self):\n return self.name\n\n def save(self, *args, **kwargs):\n \"\"\"\n Overrides the save method to perform additional validation before saving.\n \"\"\"\n self.clean()\n super().save(*args, **kwargs)" }, { "identifier": "Symptoms", "path": "health/models.py", "snippet": "class Symptoms(models.Model):\n \"\"\"\n Represents symptoms reported in cows.\n\n Attributes:\n - `name` (str): The name of the symptom.\n - `symptom_type` (str): The type of the symptom, chosen from predefined choices.\n - `description` (str): Description of the symptom (nullable).\n - `date_observed` (date): Date when the symptom was observed.\n - `severity` (str): Severity of the symptom, chosen from predefined choices.\n - `location` (str): Location of the symptom, chosen from predefined choices.\n\n Methods:\n - `clean`: Validates the attributes of the symptom.\n \"\"\"\n\n name = models.CharField(max_length=50)\n symptom_type = models.CharField(max_length=20, choices=SymptomTypeChoices.choices)\n description = models.TextField(null=True)\n severity = models.CharField(max_length=20, choices=SymptomSeverityChoices.choices)\n location = models.CharField(max_length=20, choices=SymptomLocationChoices.choices)\n date_observed = models.DateField()\n\n def clean(self):\n \"\"\"\n Validates the attributes of the symptom.\n \"\"\"\n SymptomValidator.validate_name(self.name)\n SymptomValidator.validate_fields(\n self.date_observed, self.symptom_type, self.severity, self.location\n )\n SymptomValidator.validate_type_and_location_compatibility(\n self.symptom_type, self.location\n )\n\n def __str__(self):\n return f\" {self.name} reported as #{self.severity} - on #{self.date_observed}\"\n\n def save(self, *args, **kwargs):\n \"\"\"\n Overrides the save method to perform additional validation before saving.\n \"\"\"\n self.clean()\n super().save(*args, **kwargs)" }, { "identifier": "DiseaseSerializer", "path": "health/serializers.py", "snippet": "class DiseaseSerializer(serializers.ModelSerializer):\n \"\"\"\n Serializer for the Disease model.\n\n Fields:\n - `name`: The name of the disease.\n - `pathogen`: The pathogen causing the disease.\n - `category`: The category of the disease.\n - `date_reported`: Date when the disease was reported.\n - `occurrence_date`: Date when the disease occurred.\n - `notes`: Additional notes about the disease (nullable).\n - `cows`: Cows affected by the disease.\n - `symptoms`: Symptoms associated with the disease.\n\n Meta:\n - `model`: The Disease model for which the serializer is defined.\n - `fields`: The fields to include in the serialized representation.\n\n Note: The `cows` and `symptoms` fields are represented by their primary keys in the serialized data.\n\n \"\"\"\n\n class Meta:\n model = Disease\n fields = (\n \"name\",\n \"pathogen\",\n \"category\",\n \"date_reported\",\n \"occurrence_date\",\n \"notes\",\n \"cows\",\n \"symptoms\",\n )" }, { "identifier": "SexChoices", "path": "users/choices.py", "snippet": "class SexChoices(models.TextChoices):\n MALE = \"Male\"\n FEMALE = \"Female\"" }, { "identifier": "todays_date", "path": "core/utils.py", "snippet": "" } ]
from datetime import timedelta from django.urls import reverse from rest_framework import status from rest_framework.test import APIClient from core.choices import ( CowAvailabilityChoices, CowBreedChoices, CowCategoryChoices, CowPregnancyChoices, CowProductionStatusChoices, ) from core.serializers import CowSerializer from health.choices import ( CullingReasonChoices, SymptomLocationChoices, SymptomSeverityChoices, SymptomTypeChoices, PathogenChoices, DiseaseCategoryChoices, TreatmentStatusChoices, ) from health.models import Pathogen, DiseaseCategory, Symptoms from health.serializers import DiseaseSerializer from users.choices import SexChoices from core.utils import todays_date import pytest
7,951
quarantine_data = { "cow": cow.id, "reason": "Calving", "start_date": todays_date - timedelta(days=30), "end_date": todays_date, "notes": "Some notes", } return quarantine_data @pytest.fixture def setup_symptom_data(): symptom_data = { "name": "Fever", "symptom_type": SymptomTypeChoices.RESPIRATORY, "date_observed": todays_date, "severity": SymptomSeverityChoices.MILD, "location": SymptomLocationChoices.WHOLE_BODY, } return symptom_data @pytest.fixture def setup_disease_data(): pathogen = Pathogen.objects.create(name=PathogenChoices.UNKNOWN) disease_category = DiseaseCategory.objects.create( name=DiseaseCategoryChoices.NUTRITION ) general_cow = { "name": "General Cow", "breed": {"name": CowBreedChoices.AYRSHIRE}, "date_of_birth": todays_date - timedelta(days=650), "gender": SexChoices.FEMALE, "availability_status": CowAvailabilityChoices.ALIVE, "current_pregnancy_status": CowPregnancyChoices.PREGNANT, "category": CowCategoryChoices.HEIFER, "current_production_status": CowProductionStatusChoices.PREGNANT_NOT_LACTATING, } serializer1 = CowSerializer(data=general_cow) serializer2 = CowSerializer(data=general_cow) assert serializer1.is_valid() assert serializer2.is_valid() cow1 = serializer1.save() cow2 = serializer2.save() symptom_data = { "name": "Fever", "symptom_type": SymptomTypeChoices.RESPIRATORY, "date_observed": todays_date, "severity": SymptomSeverityChoices.MILD, "location": SymptomLocationChoices.WHOLE_BODY, } symptom = Symptoms.objects.create(**symptom_data) disease_data = { "name": "Brucellosis", "pathogen": pathogen.id, "category": disease_category.id, "occurrence_date": todays_date, "cows": [cow1.id, cow2.id], "symptoms": [symptom.id], } return disease_data @pytest.fixture def setup_treatment_data(): pathogen = Pathogen.objects.create(name=PathogenChoices.UNKNOWN) disease_category = DiseaseCategory.objects.create( name=DiseaseCategoryChoices.NUTRITION ) general_cow = { "name": "General Cow", "breed": {"name": CowBreedChoices.AYRSHIRE}, "date_of_birth": todays_date - timedelta(days=650), "gender": SexChoices.FEMALE, "availability_status": CowAvailabilityChoices.ALIVE, "current_pregnancy_status": CowPregnancyChoices.PREGNANT, "category": CowCategoryChoices.HEIFER, "current_production_status": CowProductionStatusChoices.PREGNANT_NOT_LACTATING, } serializer1 = CowSerializer(data=general_cow) serializer2 = CowSerializer(data=general_cow) assert serializer1.is_valid() assert serializer2.is_valid() cow1 = serializer1.save() cow2 = serializer2.save() symptom_data = { "name": "Fever", "symptom_type": SymptomTypeChoices.RESPIRATORY, "date_observed": todays_date, "severity": SymptomSeverityChoices.MILD, "location": SymptomLocationChoices.WHOLE_BODY, } symptom = Symptoms.objects.create(**symptom_data) disease_data = { "name": "Brucellosis", "pathogen": pathogen.id, "category": disease_category.id, "occurrence_date": todays_date, "cows": [cow1.id, cow2.id], "symptoms": [symptom.id], } serializer3 = DiseaseSerializer(data=disease_data) serializer3.is_valid() disease = serializer3.save() treatment_data = { "disease": disease.id, "cow": cow1.id, "treatment_method": "Intravenous injection on the thighs", "notes": "Fully treated cow",
@pytest.fixture() @pytest.mark.django_db def setup_users(): client = APIClient() # Create farm owner user farm_owner_data = { "username": "[email protected]", "email": "[email protected]", "password": "testpassword", "first_name": "Farm", "last_name": "Owner", "phone_number": "+254787654321", "sex": SexChoices.MALE, "is_farm_owner": True, } farm_owner_login_data = { "username": "[email protected]", "password": "testpassword", } response = client.post("/auth/users/", farm_owner_data) # Retrieve the token after login response = client.post(reverse("users:login"), farm_owner_login_data) farm_owner_token = response.data["auth_token"] # Create farm manager user farm_manager_data = { "username": "[email protected]", "email": "[email protected]", "password": "testpassword", "first_name": "Farm", "last_name": "Manager", "phone_number": "+254755555555", "sex": SexChoices.MALE, "is_farm_manager": True, } farm_manager_login_data = { "username": "[email protected]", "password": "testpassword", } response = client.post("/auth/users/", farm_manager_data) # Retrieve the token after login response = client.post(reverse("users:login"), farm_manager_login_data) farm_manager_token = response.data["auth_token"] # Create assistant farm manager user asst_farm_manager_data = { "username": "[email protected]", "email": "[email protected]", "password": "testpassword", "first_name": "Assistant", "last_name": "Farm Manager", "phone_number": "+254744444444", "sex": SexChoices.FEMALE, "is_assistant_farm_manager": True, } asst_farm_manager_login_data = { "username": "[email protected]", "password": "testpassword", } response = client.post("/auth/users/", asst_farm_manager_data) # Retrieve the token after login response = client.post(reverse("users:login"), asst_farm_manager_login_data) asst_farm_manager_token = response.data["auth_token"] # Create team leader user team_leader_data = { "username": "[email protected]", "email": "[email protected]", "password": "testpassword", "first_name": "Team", "last_name": "Leader", "phone_number": "+254733333333", "sex": SexChoices.MALE, "is_team_leader": True, } team_leader_login_data = { "username": "[email protected]", "password": "testpassword", } response = client.post("/auth/users/", team_leader_data) # Retrieve the token after login response = client.post(reverse("users:login"), team_leader_login_data) assert response.status_code == status.HTTP_200_OK team_leader_token = response.data["auth_token"] # Create farm worker user farm_worker_data = { "username": "[email protected]", "email": "[email protected]", "password": "testpassword", "first_name": "Farm", "last_name": "Worker", "phone_number": "+254722222222", "sex": SexChoices.FEMALE, "is_farm_worker": True, } farm_worker_login_data = { "username": "[email protected]", "password": "testpassword", } response = client.post("/auth/users/", farm_worker_data) # Retrieve the token after login response = client.post(reverse("users:login"), farm_worker_login_data) farm_worker_token = response.data["auth_token"] return { "client": client, "farm_owner_token": farm_owner_token, "farm_manager_token": farm_manager_token, "asst_farm_manager_token": asst_farm_manager_token, "team_leader_token": team_leader_token, "farm_worker_token": farm_worker_token, } @pytest.fixture @pytest.mark.django_db def setup_weight_record_data(): general_cow = { "name": "General Cow", "breed": {"name": CowBreedChoices.AYRSHIRE}, "date_of_birth": todays_date - timedelta(days=650), "gender": SexChoices.FEMALE, "availability_status": CowAvailabilityChoices.ALIVE, "current_pregnancy_status": CowPregnancyChoices.OPEN, "category": CowCategoryChoices.HEIFER, "current_production_status": CowProductionStatusChoices.OPEN, } serializer = CowSerializer(data=general_cow) assert serializer.is_valid() cow = serializer.save() weight_data = {"cow": cow.id, "weight_in_kgs": 1150} return weight_data @pytest.fixture @pytest.mark.django_db def setup_culling_record_data(): general_cow = { "name": "General Cow", "breed": {"name": CowBreedChoices.AYRSHIRE}, "date_of_birth": todays_date - timedelta(days=370), "gender": SexChoices.FEMALE, "availability_status": CowAvailabilityChoices.ALIVE, "current_pregnancy_status": CowPregnancyChoices.PREGNANT, "category": CowCategoryChoices.HEIFER, "current_production_status": CowProductionStatusChoices.PREGNANT_NOT_LACTATING, } serializer = CowSerializer(data=general_cow) assert serializer.is_valid() cow = serializer.save() culling_data = { "cow": cow.id, "reason": CullingReasonChoices.COST_OF_CARE, } return culling_data @pytest.fixture @pytest.mark.django_db def setup_quarantine_record_data(): general_cow = { "name": "General Cow", "breed": {"name": CowBreedChoices.AYRSHIRE}, "date_of_birth": todays_date - timedelta(days=650), "gender": SexChoices.FEMALE, "availability_status": CowAvailabilityChoices.ALIVE, "current_pregnancy_status": CowPregnancyChoices.PREGNANT, "category": CowCategoryChoices.HEIFER, "current_production_status": CowProductionStatusChoices.PREGNANT_NOT_LACTATING, } serializer = CowSerializer(data=general_cow) if not serializer.is_valid(): print(serializer.errors) assert serializer.is_valid() cow = serializer.save() quarantine_data = { "cow": cow.id, "reason": "Calving", "start_date": todays_date - timedelta(days=30), "end_date": todays_date, "notes": "Some notes", } return quarantine_data @pytest.fixture def setup_symptom_data(): symptom_data = { "name": "Fever", "symptom_type": SymptomTypeChoices.RESPIRATORY, "date_observed": todays_date, "severity": SymptomSeverityChoices.MILD, "location": SymptomLocationChoices.WHOLE_BODY, } return symptom_data @pytest.fixture def setup_disease_data(): pathogen = Pathogen.objects.create(name=PathogenChoices.UNKNOWN) disease_category = DiseaseCategory.objects.create( name=DiseaseCategoryChoices.NUTRITION ) general_cow = { "name": "General Cow", "breed": {"name": CowBreedChoices.AYRSHIRE}, "date_of_birth": todays_date - timedelta(days=650), "gender": SexChoices.FEMALE, "availability_status": CowAvailabilityChoices.ALIVE, "current_pregnancy_status": CowPregnancyChoices.PREGNANT, "category": CowCategoryChoices.HEIFER, "current_production_status": CowProductionStatusChoices.PREGNANT_NOT_LACTATING, } serializer1 = CowSerializer(data=general_cow) serializer2 = CowSerializer(data=general_cow) assert serializer1.is_valid() assert serializer2.is_valid() cow1 = serializer1.save() cow2 = serializer2.save() symptom_data = { "name": "Fever", "symptom_type": SymptomTypeChoices.RESPIRATORY, "date_observed": todays_date, "severity": SymptomSeverityChoices.MILD, "location": SymptomLocationChoices.WHOLE_BODY, } symptom = Symptoms.objects.create(**symptom_data) disease_data = { "name": "Brucellosis", "pathogen": pathogen.id, "category": disease_category.id, "occurrence_date": todays_date, "cows": [cow1.id, cow2.id], "symptoms": [symptom.id], } return disease_data @pytest.fixture def setup_treatment_data(): pathogen = Pathogen.objects.create(name=PathogenChoices.UNKNOWN) disease_category = DiseaseCategory.objects.create( name=DiseaseCategoryChoices.NUTRITION ) general_cow = { "name": "General Cow", "breed": {"name": CowBreedChoices.AYRSHIRE}, "date_of_birth": todays_date - timedelta(days=650), "gender": SexChoices.FEMALE, "availability_status": CowAvailabilityChoices.ALIVE, "current_pregnancy_status": CowPregnancyChoices.PREGNANT, "category": CowCategoryChoices.HEIFER, "current_production_status": CowProductionStatusChoices.PREGNANT_NOT_LACTATING, } serializer1 = CowSerializer(data=general_cow) serializer2 = CowSerializer(data=general_cow) assert serializer1.is_valid() assert serializer2.is_valid() cow1 = serializer1.save() cow2 = serializer2.save() symptom_data = { "name": "Fever", "symptom_type": SymptomTypeChoices.RESPIRATORY, "date_observed": todays_date, "severity": SymptomSeverityChoices.MILD, "location": SymptomLocationChoices.WHOLE_BODY, } symptom = Symptoms.objects.create(**symptom_data) disease_data = { "name": "Brucellosis", "pathogen": pathogen.id, "category": disease_category.id, "occurrence_date": todays_date, "cows": [cow1.id, cow2.id], "symptoms": [symptom.id], } serializer3 = DiseaseSerializer(data=disease_data) serializer3.is_valid() disease = serializer3.save() treatment_data = { "disease": disease.id, "cow": cow1.id, "treatment_method": "Intravenous injection on the thighs", "notes": "Fully treated cow",
"treatment_status": TreatmentStatusChoices.COMPLETED,
12
2023-12-09 06:56:42+00:00
12k
facebookresearch/chat2map-official
chat2map/mapping/passive_mapping/policy.py
[ { "identifier": "VisualEnc", "path": "chat2map/mapping/mapping_models/visual_cnn.py", "snippet": "class VisualEnc(nn.Module):\n \"\"\"Visual encoder\"\"\"\n\n def __init__(self, cfg=None):\n \"\"\"Takes in RGB images and 90 degree FoV local egocentric map inputs and encodes them\"\"\"\n super().__init__()\n\n passive_mapping_cfg = cfg.PassiveMapping\n sim_cfg = cfg.TASK_CONFIG.SIMULATOR\n\n assert \"RGB_SENSOR\" in cfg.SENSORS\n\n self._n_inputMap_channels = sim_cfg.EGO_LOCAL_OCC_MAP.NUM_CHANNELS\n\n self._num_out_channels = passive_mapping_cfg.VisualEnc.num_out_channels\n assert passive_mapping_cfg.MemoryNet.Transformer.input_size == 2 * self._num_out_channels\n\n cnn_layers = [\n conv_block(self._n_inputMap_channels, 64, norm_layer=nn.BatchNorm2d),\n conv_block(64, 64, norm_layer= nn.BatchNorm2d),\n conv_block(64, 128, padding=(2, 2), norm_layer=nn.BatchNorm2d),\n conv_block(128, 256, (3, 3), padding=(1, 1), stride=(1, 1), norm_layer=nn.BatchNorm2d),\n conv_block(256, self._num_out_channels, (3, 3), padding=(1, 1), stride=(1, 1), norm_layer=nn.BatchNorm2d)\n ]\n self.cnn = nn.Sequential(*cnn_layers)\n\n for module in self.cnn:\n for layer in module:\n if isinstance(layer, (nn.Conv2d, nn.ConvTranspose2d, nn.Linear)):\n nn.init.kaiming_normal_(\n layer.weight, nn.init.calculate_gain(\"leaky_relu\", 0.2)\n )\n if layer.bias is not None:\n nn.init.constant_(layer.bias, val=0)\n elif isinstance(layer, (nn.BatchNorm1d, nn.BatchNorm2d)):\n if layer.affine:\n layer.weight.data.fill_(1)\n layer.bias.data.zero_()\n\n rgb_cnn_layers = [\n conv_block(3, 64, norm_layer=nn.BatchNorm2d),\n conv_block(64, 64, norm_layer=nn.BatchNorm2d),\n conv_block(64, 128, norm_layer=nn.BatchNorm2d),\n conv_block(128, 256, norm_layer=nn.BatchNorm2d),\n conv_block(256, self._num_out_channels, norm_layer=nn.BatchNorm2d),\n ]\n self.rgb_cnn = nn.Sequential(*rgb_cnn_layers)\n\n for module in self.rgb_cnn:\n for layer in module:\n if isinstance(layer, (nn.Conv2d, nn.ConvTranspose2d, nn.Linear)):\n nn.init.kaiming_normal_(\n layer.weight, nn.init.calculate_gain(\"leaky_relu\", 0.2)\n )\n if layer.bias is not None:\n nn.init.constant_(layer.bias, val=0)\n elif isinstance(layer, (nn.BatchNorm1d, nn.BatchNorm2d)):\n if layer.affine:\n layer.weight.data.fill_(1)\n layer.bias.data.zero_()\n\n @property\n def is_blind(self):\n return False\n\n @property\n def n_out_feats(self):\n return 16 * 512\n\n def _preprocess_rgb(self, rgb_observations):\n return rgb_observations\n\n def forward(self, observations,):\n \"\"\"Given RGB imags and 90 degree FoV egocentric local occupancy maps, produces visual features\"\"\"\n assert \"occ_map\" in observations\n occMap_observations = observations[\"occ_map\"]\n occMap_observations = occMap_observations.permute(0, 3, 1, 2)\n\n occMap_out = self.cnn(occMap_observations)\n\n assert \"rgb\" in observations\n rgb_observations = observations[\"rgb\"]\n # permute tensor to dimension [BATCH x CHANNEL x HEIGHT X WIDTH]\n rgb_observations = rgb_observations.permute(0, 3, 1, 2)\n rgb_observations = rgb_observations.float() / 255.0 # normalize RGB\n rgb_observations = self._preprocess_rgb(rgb_observations)\n\n rgb_out = self.rgb_cnn(rgb_observations)\n\n out = torch.cat([occMap_out, rgb_out], dim=1)\n\n return out" }, { "identifier": "OccMapDec", "path": "chat2map/mapping/mapping_models/visual_cnn.py", "snippet": "class OccMapDec(nn.Module):\n \"\"\"Occupancy map decoder\"\"\"\n\n def __init__(self, passive_mapping_cfg, sim_cfg,):\n \"\"\"Takes in feature outputs of the transformer decoder and predicts estimates of 360 degree FoV local\n egocentric occupancy map targets\"\"\"\n super().__init__()\n\n self._passive_mapping_cfg = passive_mapping_cfg\n self._glob_can_occ_map_ego_crop_cfg = sim_cfg.GT_GLOBAL_CANONICAL_OCC_MAP_EGO_CROP\n\n assert self._glob_can_occ_map_ego_crop_cfg.SIZE in [64, 80, 96, 128]\n\n assert passive_mapping_cfg.MemoryNet.type == \"transformer\"\n\n assert passive_mapping_cfg.MemoryNet.Transformer.decoder_out_size == 1024\n self._n_inputMapFeat_channels = 1024\n self._inputFeat_h = 4\n self._inputFeat_w = 4\n self._input_feat_size = self._n_inputMapFeat_channels * self._inputFeat_h * self._inputFeat_w\n\n if self._glob_can_occ_map_ego_crop_cfg.SIZE == 64:\n self.dec_cnn = nn.Sequential(\n convT_block(1024, 64 * 8, norm_layer=nn.BatchNorm2d),\n convT_block(64 * 8, 64 * 4, norm_layer=nn.BatchNorm2d),\n convT_block(64 * 4, 64 * 2, norm_layer=nn.BatchNorm2d),\n convT_block(64 * 2, 64 * 1, norm_layer=nn.BatchNorm2d),\n convT_block(64 * 1, self._glob_can_occ_map_ego_crop_cfg.NUM_CHANNELS, (3, 3), stride=(1, 1),\n padding=(1, 1), outermost=True, use_sigmoid=True,),\n )\n elif self._glob_can_occ_map_ego_crop_cfg.SIZE == 80:\n self.dec_cnn = nn.Sequential(\n conv_block(1024, 64 * 8, kernel_size=(2, 2), padding=(1, 1), stride=(1, 1), norm_layer=nn.BatchNorm2d),\n convT_block(64 * 8, 64 * 8, norm_layer=nn.BatchNorm2d),\n convT_block(64 * 8, 64 * 4, norm_layer=nn.BatchNorm2d),\n convT_block(64 * 4, 64 * 2, norm_layer=nn.BatchNorm2d),\n convT_block(64 * 2, 64 * 1, norm_layer=nn.BatchNorm2d),\n convT_block(64 * 1, self._glob_can_occ_map_ego_crop_cfg.NUM_CHANNELS, (3, 3), stride=(1, 1),\n padding=(1, 1), outermost=True, use_sigmoid=True,),\n )\n elif self._glob_can_occ_map_ego_crop_cfg.SIZE == 96:\n self.dec_cnn = nn.Sequential(\n conv_block(1024, 64 * 8, kernel_size=(1, 1), padding=(1, 1), stride=(1, 1), norm_layer=nn.BatchNorm2d),\n convT_block(64 * 8, 64 * 8, norm_layer=nn.BatchNorm2d),\n convT_block(64 * 8, 64 * 4, norm_layer=nn.BatchNorm2d),\n convT_block(64 * 4, 64 * 2, norm_layer=nn.BatchNorm2d),\n convT_block(64 * 2, 64 * 1, norm_layer=nn.BatchNorm2d),\n convT_block(64 * 1, self._glob_can_occ_map_ego_crop_cfg.NUM_CHANNELS, (3, 3), stride=(1, 1),\n padding=(1, 1), outermost=True, use_sigmoid=True,),\n )\n elif self._glob_can_occ_map_ego_crop_cfg.SIZE == 128:\n self.dec_cnn = nn.Sequential(\n convT_block(1024, 64 * 8, norm_layer=nn.BatchNorm2d), \n convT_block(64 * 8, 64 * 4, norm_layer=nn.BatchNorm2d),\n convT_block(64 * 4, 64 * 2, norm_layer=nn.BatchNorm2d),\n convT_block(64 * 2, 64 * 1, norm_layer=nn.BatchNorm2d),\n convT_block(64 * 1, self._glob_can_occ_map_ego_crop_cfg.NUM_CHANNELS,\n outermost=True, use_sigmoid=True,),\n )\n else:\n raise NotImplementedError\n\n self.layer_init()\n\n def layer_init(self):\n for module in self.dec_cnn:\n for layer in module:\n if isinstance(layer, (nn.Conv2d, nn.ConvTranspose2d, nn.Linear)):\n nn.init.kaiming_normal_(\n layer.weight, nn.init.calculate_gain(\"relu\")\n )\n if layer.bias is not None:\n nn.init.constant_(layer.bias, val=0)\n elif isinstance(layer, (nn.BatchNorm1d, nn.BatchNorm2d)):\n if layer.affine:\n layer.weight.data.fill_(1)\n layer.bias.data.zero_()\n\n def forward(self, observations,):\n \"\"\"Given feature outputs of the transformer memory decoder, computes estimates of the 360 degree FoV local\n egocentric target occupancy maps\"\"\"\n assert \"memory_outFeats\" in observations\n memory_outFeats = observations[\"memory_outFeats\"]\n assert len(memory_outFeats.size()) == 2\n assert memory_outFeats.size(1) == self._input_feat_size\n memory_outFeats =\\\n memory_outFeats.reshape((memory_outFeats.size(0),\n self._inputFeat_h,\n self._inputFeat_w,\n -1))\n memory_outFeats = memory_outFeats.permute((0, 3, 1, 2))\n\n out = self.dec_cnn(memory_outFeats)\n\n assert len(out.size()) == 4\n # permute tensor to dimension [BATCH x HEIGHT x WIDTH x CHANNEL]\n out = out.permute(0, 2, 3, 1)\n\n return out" }, { "identifier": "AudioEnc", "path": "chat2map/mapping/mapping_models/audio_cnn.py", "snippet": "class AudioEnc(nn.Module):\n \"\"\"Audio encoder\"\"\"\n\n def __init__(self, cfg,):\n \"\"\"Transforms the spatial audio into spectrograms and computes their features\"\"\"\n super().__init__()\n\n self._passive_mapping_cfg = cfg.PassiveMapping\n self._task_cfg = cfg.TASK_CONFIG\n self._env_cfg = self._task_cfg.ENVIRONMENT\n\n self._sim_cfg = self._task_cfg.SIMULATOR\n self._audio_cfg = self._sim_cfg.AUDIO\n\n audioEnc_cfg = self._passive_mapping_cfg.AudioEnc\n\n self._n_input_channels = audioEnc_cfg.num_input_channels\n\n self.stft_model = torchaudio.transforms.Spectrogram(\n n_fft=self._audio_cfg.N_FFT,\n win_length=self._audio_cfg.WIN_LENGTH,\n hop_length=self._audio_cfg.HOP_LENGTH,\n power=2,\n )\n\n self.model = nn.Sequential(\n conv_block(self._n_input_channels, 64, norm_layer=nn.BatchNorm2d),\n conv_block(64, 64, (8, 8), stride=(4, 4), padding=(2, 2), norm_layer=nn.BatchNorm2d),\n conv_block(64, 128, norm_layer=nn.BatchNorm2d),\n conv_block(128, 256, norm_layer=nn.BatchNorm2d),\n conv_block(256, self._passive_mapping_cfg.MemoryNet.Transformer.input_size, norm_layer=nn.BatchNorm2d),\n )\n\n for module in self.model:\n for layer in module:\n if isinstance(layer, (nn.Conv2d, nn.ConvTranspose2d, nn.Linear)):\n nn.init.kaiming_normal_(\n layer.weight, nn.init.calculate_gain(\"leaky_relu\", 0.2)\n )\n if layer.bias is not None:\n nn.init.constant_(layer.bias, val=0)\n elif isinstance(layer, (nn.BatchNorm1d, nn.BatchNorm2d)):\n if layer.affine:\n layer.weight.data.fill_(1)\n layer.bias.data.zero_()\n\n @property\n def n_out_feats(self):\n return 1024\n\n def forward(self, observations):\n \"\"\"Given the audio waveforms, transforms them into spectrograms and computes their features\"\"\"\n assert \"audio\" in observations\n audio_wavs = observations[\"audio\"]\n audio_wavs = audio_wavs.permute(0, 2, 1)\n\n B = audio_wavs.size(0)\n n_channels = audio_wavs.size(1)\n\n audio_mag_spects = self.stft_model(audio_wavs.reshape(audio_wavs.size(0) * audio_wavs.size(1), -1)).pow(0.5)\n audio_mag_spects = audio_mag_spects.reshape(B, n_channels, *audio_mag_spects.size()[1:])\n\n out = self.model(audio_mag_spects)\n assert out.size(2) == self._passive_mapping_cfg.PositionalNet.patch_hwCh[0]\n assert out.size(3) == self._passive_mapping_cfg.PositionalNet.patch_hwCh[1]\n\n return out" }, { "identifier": "ModalityTagTypeNet", "path": "chat2map/mapping/mapping_models/modality_tag_type_net.py", "snippet": "class ModalityTagTypeNet(nn.Module):\n \"\"\"Takes the modality type tag for a certain modality and produces its embeddings\"\"\"\n\n def __init__(self, n_modality_tag_types, passive_mapping_cfg,):\n \"\"\"\n Creates an instance of the class that takes the modality type tag for a certain modality and produces its\n embeddings\n :param n_modality_tag_types: number of modality tag types\n :param passive_mapping_cfg: passive mapping config\n \"\"\"\n\n super().__init__()\n self._positional_net_cfg = passive_mapping_cfg.PositionalNet\n\n self._out_h = self._positional_net_cfg.patch_hwCh[0]\n self._out_w = self._positional_net_cfg.patch_hwCh[1]\n self._n_out_ch = self._positional_net_cfg.patch_hwCh[2]\n\n assert self._n_out_ch == passive_mapping_cfg.modality_tag_type_encoding_size, print(self._n_out_ch,\n passive_mapping_cfg.modality_tag_type_encoding_size)\n self.modality_tag_type_lookup_dict = nn.Embedding(n_modality_tag_types,\n passive_mapping_cfg.modality_tag_type_encoding_size,)\n\n def forward(self, x):\n \"\"\"Given the modality type tag, computes the modality embeddings\"\"\"\n out = self.modality_tag_type_lookup_dict(x)\n out = out.unsqueeze(-1).unsqueeze(-1)\n out = out.repeat((1, 1, self._out_h, self._out_w))\n return out" }, { "identifier": "PositionalNet", "path": "chat2map/mapping/mapping_models/positional_net.py", "snippet": "class PositionalNet(nn.Module):\n \"\"\"\n Takes in positional attributes and produces and produces their embeddings\n \"\"\"\n\n def __init__(self, passive_mapping_cfg,):\n \"\"\"\n Creates an instance of the class to take in positional attributes and produces and produces their embeddings\n :param passive_mapping_cfg: passive mapping config\n \"\"\"\n super().__init__()\n self._passive_mapping_cfg = passive_mapping_cfg\n self._positional_net_cfg = passive_mapping_cfg.PositionalNet\n\n self._n_positional_obs = 5\n\n # source: 1. https://github.com/jalammar/jalammar.github.io/blob/master/notebookes/transformer/transformer_positional_encoding_graph.ipynb\n # 2. https://towardsdatascience.com/master-positional-encoding-part-i-63c05d90a0c3\n self._freqs = MIN_FREQ ** (2 * (torch.arange(self._positional_net_cfg.num_freqs_for_sinusoidal,\n dtype=torch.float32) // 2) /\n self._positional_net_cfg.num_freqs_for_sinusoidal)\n\n assert passive_mapping_cfg.MemoryNet.Transformer.input_size == self._positional_net_cfg.patch_hwCh[2]\n self._n_out_feats = self._positional_net_cfg.patch_hwCh[2]\n\n self._positional_linear = nn.Sequential(\n nn.Linear(self._positional_net_cfg.num_freqs_for_sinusoidal * self._n_positional_obs,\n self._n_out_feats,\n bias=False),\n )\n\n @property\n def n_out_feats(self):\n return self._n_out_feats\n\n def forward(self, observations):\n \"\"\"given the positional observations, computes the positional embeddings\"\"\"\n\n positional_obs = observations[\"positional_obs\"]\n assert len(positional_obs.size()) == 2\n assert positional_obs.size(-1) == self._n_positional_obs\n\n freqs = self._freqs.unsqueeze(0).repeat((positional_obs.size(0), 1)).to(positional_obs.device)\n\n positional_net_out = []\n for positional_obs_idx in range(self._n_positional_obs):\n positional_obs_thisIdx = positional_obs[:, positional_obs_idx].unsqueeze(-1)\n positional_obs_thisIdx = positional_obs_thisIdx * freqs\n positional_obs_thisIdxClone = positional_obs_thisIdx.clone()\n positional_obs_thisIdxClone[..., ::2] = torch.cos(positional_obs_thisIdx[..., ::2])\n positional_obs_thisIdxClone[..., 1::2] = torch.sin(positional_obs_thisIdx[..., 1::2])\n\n positional_net_out.append(positional_obs_thisIdxClone)\n\n positional_net_out = torch.cat(positional_net_out, dim=-1)\n\n assert len(positional_net_out.size()) == 2\n assert positional_net_out.size(0) == positional_obs.size(0)\n assert positional_net_out.size(1) == (self._freqs.size(0) * self._n_positional_obs)\n\n positional_net_out = self._positional_linear(positional_net_out)\n positional_net_out = positional_net_out.unsqueeze(-1).unsqueeze(-1)\n positional_net_out = positional_net_out.repeat(\n (1,\n 1,\n self._positional_net_cfg.patch_hwCh[0],\n self._positional_net_cfg.patch_hwCh[1])\n )\n\n return positional_net_out" }, { "identifier": "PatchPositionalNet", "path": "chat2map/mapping/mapping_models/positional_net.py", "snippet": "class PatchPositionalNet(nn.Module):\n \"\"\"Takes in the positions of the feats corresponding to contiguous patches in an image or an audio spectrogram\n in the rasterized order and produces their embeddings\"\"\"\n\n def __init__(self, passive_mapping_cfg,):\n \"\"\"\n Creates an instance of the class that takes in the positions of the feats corresponding to contiguous patches\n in an image or an audio spectrogram in the rasterized order and produces their embeddings\n :param passive_mapping_cfg: passive mapping config\n \"\"\"\n\n super().__init__()\n self._passive_mapping_cfg = passive_mapping_cfg\n self._positional_net_cfg = passive_mapping_cfg.PositionalNet\n\n self._n_positional_obs = 1\n self._n_out_feats = self._positional_net_cfg.patch_hwCh[2]\n\n # source: 1. https://github.com/jalammar/jalammar.github.io/blob/master/notebookes/transformer/transformer_positional_encoding_graph.ipynb\n # 2. https://towardsdatascience.com/master-positional-encoding-part-i-63c05d90a0c3\n self._freqs = MIN_FREQ ** (2 * (torch.arange(self._positional_net_cfg.num_freqs_for_sinusoidal,\n dtype=torch.float32) // 2) /\n self._positional_net_cfg.num_freqs_for_sinusoidal)\n\n self._patch_positional_conv = nn.Sequential(\n nn.Conv2d(self._positional_net_cfg.num_freqs_for_sinusoidal *self._n_positional_obs,\n self._n_out_feats,\n kernel_size=1,\n bias=False),\n )\n\n positional_net_out = []\n for i in range(self._positional_net_cfg.patch_hwCh[0]):\n positional_net_out_thisRow = []\n for j in range(self._positional_net_cfg.patch_hwCh[1]):\n raster_idx = i * self._positional_net_cfg.patch_hwCh[1] + j\n\n positional_obs_thisIdx = raster_idx * self._freqs\n positional_obs_thisIdxClone = positional_obs_thisIdx.clone()\n\n positional_obs_thisIdxClone[..., ::2] = torch.cos(positional_obs_thisIdxClone[..., ::2])\n positional_obs_thisIdxClone[..., 1::2] = torch.sin(positional_obs_thisIdxClone[..., 1::2])\n\n positional_net_out_thisRow.append(positional_obs_thisIdxClone)\n\n positional_net_out.append(torch.stack(positional_net_out_thisRow, dim=0))\n\n positional_net_out = torch.stack(positional_net_out, dim=0).permute((2, 0, 1))\n self._positional_net_out = positional_net_out\n\n assert self._n_out_feats == passive_mapping_cfg.MemoryNet.Transformer.input_size\n\n @property\n def n_out_feats(self):\n return self._n_out_feats\n\n def forward(self, observations):\n positional_obs = observations[\"positional_obs\"]\n positional_net_out = self._positional_net_out.unsqueeze(0).repeat((positional_obs.size(0), 1, 1, 1))\\\n .to(positional_obs.device)\n\n positional_net_out = self._patch_positional_conv(positional_net_out)\n\n return positional_net_out" }, { "identifier": "FusionNet", "path": "chat2map/mapping/mapping_models/fusion_net.py", "snippet": "class FusionNet(nn.Module):\n \"\"\"Network to fuse modality features, positional embeddings and modality type tag embeddings\"\"\"\n\n def __init__(self,):\n super().__init__()\n\n def forward(self, observations):\n \"\"\"fuses given different features\"\"\"\n for observation_idx, observation in enumerate(observations):\n if observation_idx == 0:\n out = observation\n else:\n out = out + observation\n\n return out" }, { "identifier": "TransformerMemory", "path": "chat2map/mapping/mapping_models/memory_net.py", "snippet": "class TransformerMemory(nn.Module):\n \"\"\"Transformer memory\"\"\"\n def __init__(self, cfg):\n \"\"\"Creates an instance of the transformer memory\"\"\"\n super().__init__()\n\n self._cfg = cfg\n\n self._passive_mapping_cfg = cfg.PassiveMapping\n self._transformer_cfg = self._passive_mapping_cfg.MemoryNet.Transformer\n self._task_cfg = cfg.TASK_CONFIG\n self._env_cfg = self._task_cfg.ENVIRONMENT\n self._sim_cfg = self._task_cfg.SIMULATOR\n\n self.transformer = TransformerWoSelfAttnInDecoder(\n d_model=self._transformer_cfg.input_size,\n nhead=self._transformer_cfg.nhead,\n num_encoder_layers=self._transformer_cfg.num_encoder_layers,\n num_decoder_layers=self._transformer_cfg.num_decoder_layers,\n dim_feedforward=self._transformer_cfg.hidden_size,\n dropout=self._transformer_cfg.dropout,\n activation=self._transformer_cfg.activation,\n d_model_out=self._transformer_cfg.decoder_out_size,\n )\n\n context_length_multiplier = 3\n context_length_multiplier *= self._sim_cfg.ALL_AGENTS.NUM\n context_length_multiplier *= (self._passive_mapping_cfg.PositionalNet.patch_hwCh[0] *\\\n self._passive_mapping_cfg.PositionalNet.patch_hwCh[1])\n\n query_length_multiplier = self._passive_mapping_cfg.PositionalNet.patch_hwCh[0] *\\\n self._passive_mapping_cfg.PositionalNet.patch_hwCh[1]\n\n self._src_mask = self._convert_attn_masks_to_transformer_format(\n torch.ones((self._env_cfg.MAX_CONTEXT_LENGTH * context_length_multiplier,\n self._env_cfg.MAX_CONTEXT_LENGTH * context_length_multiplier,))\n )\n self._mem_mask = self._convert_attn_masks_to_transformer_format(\n torch.ones((self._env_cfg.MAX_QUERY_LENGTH * query_length_multiplier,\n self._env_cfg.MAX_CONTEXT_LENGTH * context_length_multiplier,))\n )\n\n self._tgt_mask = self._convert_attn_masks_to_transformer_format(\n torch.eye(self._env_cfg.MAX_QUERY_LENGTH * query_length_multiplier)\n )\n\n def _convert_key_padding_masks_to_transformer_format(self, key_padding_masks):\n r\"\"\"The key_padding_masks is a FloatTensor with\n - 0 for invalid locations, and\n - 1 for valid locations.\n The required format is a BoolTensor with\n - True for invalid locations, and\n - False for valid locations\n\n source:\n - https://pytorch.org/docs/1.4.0/_modules/torch/nn/modules/transformer.html#TransformerDecoder\n - https://discuss.pytorch.org/t/how-to-add-padding-mask-to-nn-transformerencoder-module/63390/3\n \"\"\"\n return (1 - key_padding_masks) > 0\n\n def _convert_attn_masks_to_transformer_format(self, attn_masks):\n r\"\"\"The attn_masks is a FloatTensor with\n - 0 for invalid locations, and\n - 1 for valid locations.\n The required format is a FloatTensor with\n - float('-inf') for invalid locations, and\n - 0. for valid locations\n\n source:\n - https://pytorch.org/docs/1.4.0/_modules/torch/nn/modules/transformer.html#TransformerDecoder\n - https://discuss.pytorch.org/t/how-to-add-padding-mask-to-nn-transformerencoder-module/63390/3\n \"\"\"\n return attn_masks.float().masked_fill(attn_masks == 0, float('-inf')).masked_fill(attn_masks == 1, float(0.0))\n\n def forward(self, observations):\n \"\"\"computes transformer memory features given observations\"\"\"\n assert \"src_feats\" in observations\n src_feats = observations[\"src_feats\"]\n\n assert \"tgt_feats\" in observations\n tgt_feats = observations[\"tgt_feats\"]\n\n \"\"\"how masks works -- source: https://github.com/pytorch/pytorch/blob/7f73f1d591afba823daa4a99a939217fb54d7688/torch/nn/functional.py#L3360\"\"\"\n assert \"src_key_padding_mask\" in observations\n src_key_padding_mask = self._convert_key_padding_masks_to_transformer_format(observations[\"src_key_padding_mask\"])\n\n assert \"tgt_key_padding_mask\" in observations\n tgt_key_padding_mask = self._convert_key_padding_masks_to_transformer_format(observations[\"tgt_key_padding_mask\"])\n\n assert \"memory_key_padding_mask\" in observations\n memory_key_padding_mask = self._convert_key_padding_masks_to_transformer_format(observations[\"memory_key_padding_mask\"])\n\n self._src_mask = self._src_mask.to(src_feats.device)\n self._mem_mask = self._mem_mask.to(memory_key_padding_mask.device)\n self._tgt_mask = self._tgt_mask.to(tgt_feats.device)\n\n out = self.transformer(\n src_feats,\n tgt_feats,\n src_mask=self._src_mask,\n tgt_mask=self._tgt_mask,\n memory_mask=self._mem_mask,\n src_key_padding_mask=src_key_padding_mask,\n tgt_key_padding_mask=tgt_key_padding_mask,\n memory_key_padding_mask=memory_key_padding_mask,\n )\n\n return out" } ]
import os import pickle import math import numpy as np import torch import torch.nn as nn from torchsummary import summary from chat2map.mapping.mapping_models.visual_cnn import VisualEnc, OccMapDec from chat2map.mapping.mapping_models.audio_cnn import AudioEnc from chat2map.mapping.mapping_models.modality_tag_type_net import ModalityTagTypeNet from chat2map.mapping.mapping_models.positional_net import PositionalNet, PatchPositionalNet from chat2map.mapping.mapping_models.fusion_net import FusionNet from chat2map.mapping.mapping_models.memory_net import TransformerMemory
10,165
# B x max_query_length x ... -> (B * max_query_length) x ...; B: batch size, # max_query_length: transformer target sequence length T (https://pytorch.org/docs/1.4.0/nn.html#torch.nn.Transformer) query_views_pose = query_views_pose.reshape((-1, *query_views_pose.size()[2:])) query_views_poseFeats = self.pose_net({"positional_obs": query_views_pose}) query_feats.append(query_views_poseFeats) query_views_posePatchFeats = self.patchPose_net({"positional_obs": query_views_pose}) query_feats.append(query_views_posePatchFeats) """fusion net""" query_fusedFeats = self.fusion_net(query_feats) query_fusedFeats = query_fusedFeats.permute((0, 2, 3, 1)) query_fusedFeats = query_fusedFeats.reshape((B, self.max_query_length, query_fusedFeats.size(1), query_fusedFeats.size(2), query_fusedFeats.size(3))) assert query_fusedFeats.size(2) == self._passive_mapping_cfg.PositionalNet.patch_hwCh[0] assert query_fusedFeats.size(3) == self._passive_mapping_cfg.PositionalNet.patch_hwCh[1] query_fusedFeats = query_fusedFeats.reshape((B, self.max_query_length *\ query_fusedFeats.size(2) *\ query_fusedFeats.size(3), -1)) # B x max_query_length x ... -> max_query_length x B x -1; B: batch size, # max_query_length: transformer target sequence length T (https://pytorch.org/docs/1.4.0/nn.html#torch.nn.Transformer) query_fusedFeats = query_fusedFeats.permute(1, 0, 2) """query key padding mask""" assert "query_views_mask" in observations query_key_padding_mask = observations["query_views_mask"] assert len(query_key_padding_mask.size()) == 2 query_key_padding_mask = query_key_padding_mask.unsqueeze(-1).unsqueeze(-1) query_key_padding_mask = query_key_padding_mask.repeat((1, 1, self._passive_mapping_cfg.PositionalNet.patch_hwCh[0], self._passive_mapping_cfg.PositionalNet.patch_hwCh[1])) query_key_padding_mask = query_key_padding_mask.reshape((query_key_padding_mask.size(0), query_key_padding_mask.size(1) *\ query_key_padding_mask.size(2) *\ query_key_padding_mask.size(3))) """memory encoding: context aggregation""" memory_outFeats =\ self.memory_net( { "src_feats": context_fusedFeats, "tgt_feats": query_fusedFeats, "src_key_padding_mask": context_key_padding_mask, "tgt_key_padding_mask": query_key_padding_mask, "memory_key_padding_mask": memory_key_padding_mask, } ) # max_query_length x B x ... -> B x max_query_length x ...; B: batch size, # max_query_length: transformer target sequence length T (https://pytorch.org/docs/1.4.0/nn.html#torch.nn.Transformer) memory_outFeats = memory_outFeats.permute(1, 0, 2) memory_outFeats = memory_outFeats.reshape((B, self.max_query_length, self._passive_mapping_cfg.PositionalNet.patch_hwCh[0], self._passive_mapping_cfg.PositionalNet.patch_hwCh[1], memory_outFeats.size(2))) memory_outFeats = memory_outFeats.reshape((B * self.max_query_length, self._passive_mapping_cfg.PositionalNet.patch_hwCh[0] *\ self._passive_mapping_cfg.PositionalNet.patch_hwCh[1] *\ memory_outFeats.size(4))) """query occMap decoder""" query_occMap_pred = self.query_occMap_dec({"memory_outFeats": memory_outFeats}) # (B * max_query_length) x ... -> B x max_query_length x ...; B: batch size, # max_query_length: transformer target sequence length T (https://pytorch.org/docs/1.4.0/nn.html#torch.nn.Transformer) query_occMap_pred = query_occMap_pred.reshape((B, self.max_query_length, *query_occMap_pred.size()[1:])) return query_occMap_pred class PassiveMappingPolicy(Policy): """ Model for passive mapping """ def __init__( self, cfg, ): passive_mapping_cfg = cfg.PassiveMapping task_cfg = cfg.TASK_CONFIG sim_cfg = task_cfg.SIMULATOR # --------------------------------------------- context encoders ----------------------------------------------- """pose net""" pose_net = PositionalNet( passive_mapping_cfg=passive_mapping_cfg, ) patchPose_net = PatchPositionalNet( passive_mapping_cfg=passive_mapping_cfg, ) """modality tag type lookup table""" modality_tag_type_lookup_dict = ModalityTagTypeNet( n_modality_tag_types=3, passive_mapping_cfg=passive_mapping_cfg, ) """views encoder""" context_views_enc = VisualEnc( cfg=cfg, ) """audio encoder""" context_audio_enc = AudioEnc( cfg=cfg, ) """fusion net"""
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. class Policy(nn.Module): """ Parent class of model for passive mapping """ def __init__(self, context_views_enc, context_audio_enc, pose_net, patchPose_net, modality_tag_type_lookup_dict, fusion_net, memory_net, query_occMap_dec, cfg ): """Given the audio streams and sampled frames during a conversation, the model predicts estimates of target occupancy maps""" super().__init__() self.context_views_enc = context_views_enc self.context_audio_enc = context_audio_enc self.pose_net = pose_net self.patchPose_net = patchPose_net self.modality_tag_type_lookup_dict = modality_tag_type_lookup_dict self.fusion_net = fusion_net self.memory_net = memory_net self.query_occMap_dec = query_occMap_dec self._cfg = cfg self._task_cfg = cfg.TASK_CONFIG self._env_cfg = self._task_cfg.ENVIRONMENT self._sim_cfg = self._task_cfg.SIMULATOR self._audio_cfg = self._sim_cfg.AUDIO self._passive_mapping_cfg = cfg.PassiveMapping self.max_context_length = self._env_cfg.MAX_CONTEXT_LENGTH self.max_query_length = self._env_cfg.MAX_QUERY_LENGTH def forward(self, observations): """Given the audio streams and sampled frames during a conversation, predicts estimates of target occupancy maps""" # --------------------------------------------- context encoding ------------------------------------------------ context_feats = [] for feat_idx in range(3): context_feats.append([]) context_key_padding_mask = [] """views encoder""" assert "context_maps" in observations context_maps = observations["context_maps"] assert "context_views_pose" in observations context_views_pose = observations["context_views_pose"] assert "context_views_mask" in observations context_views_mask = observations["context_views_mask"] assert len(context_views_mask.size()) == 3 B = context_maps.size(0) num_agents = context_maps.size(1) context_maps = context_maps.reshape((-1, *context_maps.size()[3:])) context_views_dct = {"occ_map": context_maps} if "RGB_SENSOR" in self._cfg.SENSORS: assert "context_rgbs" in observations context_rgbs = observations["context_rgbs"] context_rgbs = context_rgbs.reshape((-1, *context_rgbs.size()[3:])) context_views_dct["rgb"] = context_rgbs context_views_feats = self.context_views_enc(context_views_dct) context_feats[0].append(context_views_feats) # B x num_agents x max_context_length x ... -> (B * num_agents * max_context_length) x ...; B: batch size, # max_context_length: transformer source sequence length S (https://pytorch.org/docs/1.4.0/nn.html#torch.nn.Transformer) context_views_pose = context_views_pose.reshape((-1, *context_views_pose.size()[3:])) context_views_poseFeats = self.pose_net({"positional_obs": context_views_pose}) context_feats[0].append(context_views_poseFeats) context_views_posePatchFeats = self.patchPose_net({"positional_obs": context_views_pose}) context_feats[0].append(context_views_posePatchFeats) context_views_modalityType = torch.LongTensor([0]).to(context_views_poseFeats.device) context_views_modalityTypeFeats = self.modality_tag_type_lookup_dict(context_views_modalityType) context_views_modalityTypeFeats =\ context_views_modalityTypeFeats.repeat((context_views_posePatchFeats.size(0), 1, 1, 1)) context_feats[0].append(context_views_modalityTypeFeats) # B x num_agents x max_context_length -> B x (num_agents * max_context_length); B: batch size, context_views_mask = context_views_mask.reshape((context_views_mask.size(0), -1)) context_views_mask = context_views_mask.unsqueeze(-1).unsqueeze(-1) context_views_mask = context_views_mask.repeat((1, 1, self._passive_mapping_cfg.PositionalNet.patch_hwCh[0], self._passive_mapping_cfg.PositionalNet.patch_hwCh[1])) context_views_mask = context_views_mask.reshape((context_views_mask.size(0), context_views_mask.size(1) *\ context_views_mask.size(2) *\ context_views_mask.size(3))) context_key_padding_mask.append(context_views_mask) """self audio encoder""" assert "context_selfAudio" in observations context_selfAudio = observations["context_selfAudio"] assert "context_selfAudio_pose" in observations context_selfAudio_pose = observations["context_selfAudio_pose"] assert "context_selfAudio_mask" in observations context_selfAudio_mask = observations["context_selfAudio_mask"] assert len(context_selfAudio_mask.size()) == 3 assert "context_otherAudio" in observations context_otherAudio = observations["context_otherAudio"] context_selfAudio = context_selfAudio.reshape((-1, *context_selfAudio.size()[3:])) context_otherAudio = context_otherAudio.reshape((-1, *context_otherAudio.size()[3:])) context_audio = torch.cat([context_selfAudio, context_otherAudio], dim=0) context_audio_feats = self.context_audio_enc({"audio": context_audio}) context_selfAudio_feats = context_audio_feats[:context_selfAudio.size(0)] context_feats[1].append(context_selfAudio_feats) # B x num_agents x max_context_length x ... -> (B * num_agents * max_context_length) x ...; B: batch size, # max_context_length: transformer source sequence length S (https://pytorch.org/docs/1.4.0/nn.html#torch.nn.Transformer) context_selfAudio_pose = context_selfAudio_pose.reshape((-1, *context_selfAudio_pose.size()[3:])) context_selfAudio_poseFeats = self.pose_net({"positional_obs": context_selfAudio_pose}) context_feats[1].append(context_selfAudio_poseFeats) context_selfAudio_posePatchFeats = self.patchPose_net({"positional_obs": context_selfAudio_pose}) context_feats[1].append(context_selfAudio_posePatchFeats) context_selfAudio_modalityType = torch.LongTensor([1]).to(context_selfAudio_poseFeats.device) context_selfAudio_modalityTypeFeats = self.modality_tag_type_lookup_dict(context_selfAudio_modalityType) context_selfAudio_modalityTypeFeats =\ context_selfAudio_modalityTypeFeats.repeat((context_selfAudio_modalityTypeFeats.size(0), 1, 1, 1)) context_feats[1].append(context_selfAudio_modalityTypeFeats) # B x num_agents x max_context_length -> B x (num_agents * max_context_length); B: batch size, context_selfAudio_mask = context_selfAudio_mask.reshape((context_selfAudio_mask.size(0), -1)) context_selfAudio_mask = context_selfAudio_mask.unsqueeze(-1).unsqueeze(-1) context_selfAudio_mask = context_selfAudio_mask.repeat((1, 1, self._passive_mapping_cfg.PositionalNet.patch_hwCh[0], self._passive_mapping_cfg.PositionalNet.patch_hwCh[1])) context_selfAudio_mask = context_selfAudio_mask.reshape((context_selfAudio_mask.size(0), context_selfAudio_mask.size(1) *\ context_selfAudio_mask.size(2) *\ context_selfAudio_mask.size(3))) context_key_padding_mask.append(context_selfAudio_mask) """audio from other ego encoder""" context_otherAudio_feats = context_audio_feats[context_otherAudio.size(0):] assert "context_otherAudio_pose" in observations context_otherAudio_pose = observations["context_otherAudio_pose"] assert "context_otherAudio_mask" in observations context_otherAudio_mask = observations["context_otherAudio_mask"] assert len(context_otherAudio_mask.size()) == 3 context_feats[2].append(context_otherAudio_feats) # B x num_agents x max_context_length x ... -> (B * num_agents * max_context_length) x ...; B: batch size, # max_context_length: transformer source sequence length S (https://pytorch.org/docs/1.4.0/nn.html#torch.nn.Transformer) context_otherAudio_pose = context_otherAudio_pose.reshape((-1, *context_otherAudio_pose.size()[3:])) context_otherAudio_poseFeats = self.pose_net({"positional_obs": context_otherAudio_pose}) context_feats[2].append(context_otherAudio_poseFeats) context_otherAudio_posePatchFeats = self.patchPose_net({"positional_obs": context_otherAudio_pose}) context_feats[2].append(context_otherAudio_posePatchFeats) context_otherAudio_modalityType =\ torch.LongTensor([2]).to(context_otherAudio_poseFeats.device) context_otherAudio_modalityTypeFeats = self.modality_tag_type_lookup_dict(context_otherAudio_modalityType) context_otherAudio_modalityTypeFeats =\ context_otherAudio_modalityTypeFeats.repeat((context_otherAudio_modalityTypeFeats.size(0), 1, 1, 1)) context_feats[2].append(context_otherAudio_modalityTypeFeats) # B x num_agents x max_context_length -> B x (num_agents * max_context_length); B: batch size, context_otherAudio_mask = context_otherAudio_mask.reshape((context_otherAudio_mask.size(0), -1)) context_otherAudio_mask = context_otherAudio_mask.unsqueeze(-1).unsqueeze(-1) context_otherAudio_mask = context_otherAudio_mask.repeat((1, 1, self._passive_mapping_cfg.PositionalNet.patch_hwCh[0], self._passive_mapping_cfg.PositionalNet.patch_hwCh[1])) context_otherAudio_mask = context_otherAudio_mask.reshape((context_otherAudio_mask.size(0), context_otherAudio_mask.size(1) *\ context_otherAudio_mask.size(2) *\ context_otherAudio_mask.size(3))) context_key_padding_mask.append(context_otherAudio_mask) """fusion net""" context_fusedFeats = [] for idx_contextFeats in range(len(context_feats)): temp_context_fusedFeats = self.fusion_net(context_feats[idx_contextFeats]) temp_context_fusedFeats = temp_context_fusedFeats.permute((0, 2, 3, 1)) temp_context_fusedFeats = temp_context_fusedFeats.reshape((B, num_agents * self.max_context_length, temp_context_fusedFeats.size(1), temp_context_fusedFeats.size(2), temp_context_fusedFeats.size(3))) assert temp_context_fusedFeats.size(2) == self._passive_mapping_cfg.PositionalNet.patch_hwCh[0] assert temp_context_fusedFeats.size(3) == self._passive_mapping_cfg.PositionalNet.patch_hwCh[1] temp_context_fusedFeats = temp_context_fusedFeats.reshape((B, num_agents * self.max_context_length *\ temp_context_fusedFeats.size(2) *\ temp_context_fusedFeats.size(3), -1)) temp_context_fusedFeats = temp_context_fusedFeats.permute(1, 0, 2) context_fusedFeats.append(temp_context_fusedFeats) context_fusedFeats = torch.cat(context_fusedFeats, dim=0) """context and memory key padding masks""" context_key_padding_mask = torch.cat(context_key_padding_mask, dim=-1) memory_key_padding_mask = context_key_padding_mask.clone() # --------------------------------------------- query encoding -------------------------------------------------- query_feats = [] """pose encoder""" assert "query_views_pose" in observations query_views_pose = observations["query_views_pose"] # B x max_query_length x ... -> (B * max_query_length) x ...; B: batch size, # max_query_length: transformer target sequence length T (https://pytorch.org/docs/1.4.0/nn.html#torch.nn.Transformer) query_views_pose = query_views_pose.reshape((-1, *query_views_pose.size()[2:])) query_views_poseFeats = self.pose_net({"positional_obs": query_views_pose}) query_feats.append(query_views_poseFeats) query_views_posePatchFeats = self.patchPose_net({"positional_obs": query_views_pose}) query_feats.append(query_views_posePatchFeats) """fusion net""" query_fusedFeats = self.fusion_net(query_feats) query_fusedFeats = query_fusedFeats.permute((0, 2, 3, 1)) query_fusedFeats = query_fusedFeats.reshape((B, self.max_query_length, query_fusedFeats.size(1), query_fusedFeats.size(2), query_fusedFeats.size(3))) assert query_fusedFeats.size(2) == self._passive_mapping_cfg.PositionalNet.patch_hwCh[0] assert query_fusedFeats.size(3) == self._passive_mapping_cfg.PositionalNet.patch_hwCh[1] query_fusedFeats = query_fusedFeats.reshape((B, self.max_query_length *\ query_fusedFeats.size(2) *\ query_fusedFeats.size(3), -1)) # B x max_query_length x ... -> max_query_length x B x -1; B: batch size, # max_query_length: transformer target sequence length T (https://pytorch.org/docs/1.4.0/nn.html#torch.nn.Transformer) query_fusedFeats = query_fusedFeats.permute(1, 0, 2) """query key padding mask""" assert "query_views_mask" in observations query_key_padding_mask = observations["query_views_mask"] assert len(query_key_padding_mask.size()) == 2 query_key_padding_mask = query_key_padding_mask.unsqueeze(-1).unsqueeze(-1) query_key_padding_mask = query_key_padding_mask.repeat((1, 1, self._passive_mapping_cfg.PositionalNet.patch_hwCh[0], self._passive_mapping_cfg.PositionalNet.patch_hwCh[1])) query_key_padding_mask = query_key_padding_mask.reshape((query_key_padding_mask.size(0), query_key_padding_mask.size(1) *\ query_key_padding_mask.size(2) *\ query_key_padding_mask.size(3))) """memory encoding: context aggregation""" memory_outFeats =\ self.memory_net( { "src_feats": context_fusedFeats, "tgt_feats": query_fusedFeats, "src_key_padding_mask": context_key_padding_mask, "tgt_key_padding_mask": query_key_padding_mask, "memory_key_padding_mask": memory_key_padding_mask, } ) # max_query_length x B x ... -> B x max_query_length x ...; B: batch size, # max_query_length: transformer target sequence length T (https://pytorch.org/docs/1.4.0/nn.html#torch.nn.Transformer) memory_outFeats = memory_outFeats.permute(1, 0, 2) memory_outFeats = memory_outFeats.reshape((B, self.max_query_length, self._passive_mapping_cfg.PositionalNet.patch_hwCh[0], self._passive_mapping_cfg.PositionalNet.patch_hwCh[1], memory_outFeats.size(2))) memory_outFeats = memory_outFeats.reshape((B * self.max_query_length, self._passive_mapping_cfg.PositionalNet.patch_hwCh[0] *\ self._passive_mapping_cfg.PositionalNet.patch_hwCh[1] *\ memory_outFeats.size(4))) """query occMap decoder""" query_occMap_pred = self.query_occMap_dec({"memory_outFeats": memory_outFeats}) # (B * max_query_length) x ... -> B x max_query_length x ...; B: batch size, # max_query_length: transformer target sequence length T (https://pytorch.org/docs/1.4.0/nn.html#torch.nn.Transformer) query_occMap_pred = query_occMap_pred.reshape((B, self.max_query_length, *query_occMap_pred.size()[1:])) return query_occMap_pred class PassiveMappingPolicy(Policy): """ Model for passive mapping """ def __init__( self, cfg, ): passive_mapping_cfg = cfg.PassiveMapping task_cfg = cfg.TASK_CONFIG sim_cfg = task_cfg.SIMULATOR # --------------------------------------------- context encoders ----------------------------------------------- """pose net""" pose_net = PositionalNet( passive_mapping_cfg=passive_mapping_cfg, ) patchPose_net = PatchPositionalNet( passive_mapping_cfg=passive_mapping_cfg, ) """modality tag type lookup table""" modality_tag_type_lookup_dict = ModalityTagTypeNet( n_modality_tag_types=3, passive_mapping_cfg=passive_mapping_cfg, ) """views encoder""" context_views_enc = VisualEnc( cfg=cfg, ) """audio encoder""" context_audio_enc = AudioEnc( cfg=cfg, ) """fusion net"""
fusion_net = FusionNet()
6
2023-12-06 01:20:37+00:00
12k
PeriniM/Rotary-Pendulum-RL
control/reinforcement_learning/src/main.py
[ { "identifier": "RealPendulumEnv", "path": "control/reinforcement_learning/Environments/RealPendulumEnv.py", "snippet": "class RealPendulumEnv(gym.Env):\n \"\"\"\n Real rotary pendulum with ESP32\n \"\"\"\n\n metadata = {\"render_modes\": [\"human\"]}\n\n def __init__(self, port, baudrate, render_mode=\"human\"):\n super(RealPendulumEnv, self).__init__()\n \"\"\"\n Initialize the environment.\n \n Args:\n port (str): The serial port to connect to.\n baudrate (int): The baudrate to use for the serial connection.\n render_mode (str, optional): The render mode. Defaults to \"human\".\n\n Returns:\n None\n \"\"\"\n\n self.ser = serial.Serial(\n port=port,\n baudrate=baudrate,\n parity=serial.PARITY_NONE,\n stopbits=serial.STOPBITS_ONE,\n bytesize=serial.EIGHTBITS,\n timeout=1\n )\n self.reader = SerialReader(self.ser, simulation=False)\n self.reader.start()\n self.render_mode = render_mode\n self.name = \"RealPendulum\"\n self.nbJoint = 1\n self.num_state = 2\n self.action = 0.0\n self.motorAngle = 0.0\n self.terminated = False\n self.truncated = False\n self.iterCount = 0\n self.maxIter = 1000\n self.omega_max = 10.0\n self.range_actions = np.array([-1.0, 1.0])\n self.range_observation = np.array([-1.0, 1.0])\n self.observation_space = spaces.Box(low=self.range_observation[0], high=self.range_observation[1], shape=(self.num_state,), dtype=np.float32)\n self.action_space = spaces.Box(low=self.range_actions[0], high=self.range_actions[1], shape=(1,), dtype=np.float32)\n # variable to store angles of one episode\n self.episode_angles = []\n \n def reset(self, seed=None, options=None):\n \"\"\"\n Reset the environment to the initial state.\n\n Args:\n None\n\n Returns:\n state (np.array): [bar angle, bar angular velocity]\n info (dict): Episode information\n \"\"\"\n\n super().reset(seed=seed, options=options)\n\n # Reset the episode angles\n self.episode_angles = []\n\n # Send command to pendulum to go to home position.\n self.send_serial(\"0,1\")\n # Wait for the pendulum to report it has finished resetting.\n while (1):\n self.observation_space, self.motorAngle, self.terminated = self.reader.get_state()\n if not self.terminated:\n break\n\n # Reset iteration count\n self.iterCount = 0\n self.info = {\"episode\": {\"r\": 0.0, \"l\": self.iterCount}}\n\n return self.observation_space.astype(np.float32), self.info\n \n def step(self, action):\n \"\"\"\n Take a step in the environment\n\n Args:\n action (float): Motor speed percentage [-100, 100]\n\n Returns:\n state (np.array): [bar angle, bar angular velocity]\n reward (float): Reward for the current state\n terminated (bool): Whether the episode is done or not\n truncated (bool): Whether the episode is truncated or not\n info (dict): Episode information\n \"\"\"\n\n # Send action to pendulum over serial\n self.send_serial(f\"{action*100},0\")\n self.action = action\n # Read state and episode done flag from serial\n self.observation_space, self.motorAngle, self.terminated = self.reader.get_state()\n\n # Store the angles of the episode for reward penalty\n self.episode_angles.append(self.state[0])\n \n # Calculate reward\n reward = self.calculate_reward(self.observation_space)\n self.episode_reward += reward\n self.iterCount += 1\n self.reset_policy(self.maxIter)\n self.info = {\"episode\": {\"r\": self.episode_reward, \"l\": self.iterCount}}\n\n return self.observation_space.astype(np.float32), reward, self.terminated, self.truncated, self.info\n\n def send_serial(self, command):\n \"\"\"\n Send a command to the pendulum over serial\n\n Args:\n command (str): [motor speed percentage, reset flag]\n\n Returns:\n None\n \"\"\"\n\n self.ser.write(f\"{command}\\n\".encode())\n # time.sleep(0.1)\n \n def reset_policy(self, reset_count=200):\n \"\"\"\n Policy to reset the environment\n\n Args:\n reset_count (int, optional): Number of iterations to wait before resetting the system. Defaults to 200.\n \n Returns:\n None\n \"\"\"\n\n if self.iterCount > reset_count:\n self.terminated = True\n \n def calculate_reward(self, state):\n \"\"\"\n Calculate the reward for the current state\n\n Args:\n state (np.array): [bar angle, bar angular velocity]\n\n Returns:\n reward (float): Reward for the current state\n \"\"\"\n\n # Constants to scale the angle and velocity penalties\n ANGLE_WEIGHT = 1.0\n VELOCITY_WEIGHT = 0.1\n MOTOR_ANGLE_WEIGHT = 1.0\n ACTION_WEIGHT = 0.01\n\n # Penalize the angle to be minimized\n angle_penalty = ANGLE_WEIGHT * (state[0] ** 2)\n # Penalize the angular velocity to be minimized\n velocity_penalty = VELOCITY_WEIGHT * (state[1] ** 2)\n\n # Penalize the motor angle to be minimized\n motor_angle = self.motorAngle / 180.0\n motor_angle_penalty = MOTOR_ANGLE_WEIGHT * (motor_angle ** 2)\n\n # Penalize the action to be minimized\n action_penalty = ACTION_WEIGHT * (self.action ** 2)\n\n # Reward is higher when penalties are lower\n reward = -(angle_penalty + velocity_penalty + motor_angle_penalty + action_penalty)\n\n # Penalize the reward if the average angle of the episode is close to pi\n # after 3/4 of the maximum iterations\n if self.iterCount > self.maxIter*3/4:\n if np.abs(np.mean(self.episode_angles)) < (np.pi-0.8):\n reward-=100.0\n # if self.terminated:\n # if self.iterCount < self.maxIter*1/10:\n # reward-=100.0\n return reward\n\n def render(self, camera=False):\n \"\"\"\n Render the state (optional), e.g. display the video stream\n \"\"\"\n if camera:\n print(\"Connect the camera to the pendulum and display the video stream.\")\n\n def close(self):\n \"\"\"\n Close the serial connection\n\n Args:\n None\n\n Returns:\n None\n \"\"\"\n\n self.ser.close()" }, { "identifier": "PyBulletPendulumEnv", "path": "control/reinforcement_learning/Environments/PyBulletPendulumEnv.py", "snippet": "class PyBulletPendulumEnv(gym.Env):\n \"\"\"\n PyBullet Rotary Pendulum\n \"\"\"\n\n metadata = {\"render_modes\": [\"human\"]}\n\n def __init__(self, render_mode=\"human\"):\n super(PyBulletPendulumEnv, self).__init__()\n \"\"\"\n Initialize the PyBullet Rotary Pendulum environment\n\n Args:\n render (bool, optional): Whether to render the environment. Defaults to True.\n\n Returns:\n None\n \"\"\"\n\n self.render_mode = render_mode\n # Initialize PyBullet\n if render_mode == \"human\":\n self.physicsClient = p.connect(p.GUI)\n else:\n self.physicsClient = p.connect(p.DIRECT)\n\n p.setAdditionalSearchPath(pybullet_data.getDataPath())\n p.setGravity(0, 0, -9.806)\n # move camera to focus on the robot\n p.resetDebugVisualizerCamera(cameraDistance=0.4, cameraYaw=0, cameraPitch=-30, cameraTargetPosition=[0,0,0.1])\n # Load the plane and pendulum URDF\n self.planeId = p.loadURDF(\"plane.urdf\")\n self.load_pendulum_urdf()\n\n # Define other environment parameters\n self.name = \"PyBulletPendulum\"\n self.nbJoint = 1\n self.num_state = 2\n self.action = 0.0\n self.n_actions = 101\n self.range_actions = np.array([-1.0, 1.0])\n self.range_observation = np.array([-1.0, 1.0])\n self.observation_space = spaces.Box(low=self.range_observation[0], high=self.range_observation[1], shape=(self.num_state,), dtype=np.float32)\n self.action_space = spaces.Box(low=self.range_actions[0], high=self.range_actions[1], shape=(1,), dtype=np.float32)\n self.motorAngle = 0.0\n self.terminated = False\n self.truncated = False\n self.info = {}\n self.iterCount = 0\n self.maxIter = 1500\n self.omega_max = 10.0\n self.episode_reward = 0.0\n \n # variable to store angles of one episode\n self.episode_angles = []\n\n def load_pendulum_urdf(self):\n \"\"\"\n Load the pendulum URDF into the environment.\n\n Args:\n None\n\n Returns:\n None\n \"\"\"\n\n cubeStartPos = [0, 0, 0]\n cubeStartOrientation = p.getQuaternionFromEuler([np.pi / 2, 0, 0])\n curr_dir = os.path.abspath(os.path.dirname(__file__))\n robot_urdf = 'Rotary_Pendulum_URDF.urdf'\n # Construct the path to the URDF file\n urdf_path = os.path.join(curr_dir, '..', '..', '..', 'simulation', 'urdf', robot_urdf)\n self.robotId = p.loadURDF(urdf_path, cubeStartPos, cubeStartOrientation,\n # flags=p.URDF_USE_INERTIA_FROM_FILE,\n useFixedBase=True\n )\n\n # Define joint indices as per your URDF structure\n self.motor_joint_idx = [p.getJointInfo(self.robotId, i)[1] for i in range(p.getNumJoints(self.robotId))].index(b'Revolute_3')\n self.bar_joint_idx = [p.getJointInfo(self.robotId, i)[1] for i in range(p.getNumJoints(self.robotId))].index(b'Revolute_5')\n\n # Define real robot parameters\n self.steps_per_rev = 3200\n self.max_speed_steps_per_sec = 4000.0\n # Calculate radians per step\n self.radians_per_step = (2 * np.pi) / self.steps_per_rev\n # Calculate max speed in radians per second [rad/s]\n self.max_motor_speed = self.max_speed_steps_per_sec * self.radians_per_step\n # Admissible motor angle range [deg]\n self.motor_angle_range = [-150, 150]\n self.out_of_range = False\n\n # Compensation angles for the URDF\n self.motor_compensation_angle = 0.400\n self.bar_compensation_angle = -0.264\n\n def reset(self, seed=None, options=None):\n \"\"\"\n Reset the environment to a random state\n\n Args:\n None\n\n Returns:\n state (np.array): [bar_angle, bar_angular_velocity]\n \"\"\"\n\n super().reset(seed=seed, options=options)\n # Reset the episode angles\n self.episode_angles = []\n self.episode_reward = 0.0\n self.terminated = False\n # Send command to pendulum to reset to random position\n self.send_fake_serial([0, 1])\n\n # get the state from the pendulum\n self.observation_space, self.motorAngle, self.terminated = self.get_state()\n\n # Reset iteration count\n self.iterCount = 0\n self.info = {\"episode\": {\"r\": 0.0, \"l\": self.iterCount}}\n \n return self.observation_space.astype(np.float32), self.info\n \n def step(self, action):\n \"\"\"\n Take a step in the environment\n\n Args:\n action (float): Motor speed percentage [-100, 100]\n\n Returns:\n state (np.array): [bar angle, bar angular velocity]\n \"\"\"\n\n # multiply the action by 100 to get the percentage\n self.action = action*100.0\n # Send action to pendulum over serial\n self.send_fake_serial([self.action, 0])\n # Read state and episode done flag from serial\n self.observation_space, self.motorAngle, self.terminated = self.get_state()\n\n # Store the angles of the episode for reward penalty\n self.episode_angles.append(self.observation_space[0])\n \n # Calculate reward\n reward = self.calculate_reward(self.observation_space)\n self.episode_reward += reward\n self.iterCount += 1\n self.reset_policy(self.maxIter)\n self.info = {\"episode\": {\"r\": self.episode_reward, \"l\": self.iterCount}}\n\n # return normalized_state, reward, self.done\n return self.observation_space.astype(np.float32), reward, self.terminated, self.truncated, self.info\n \n def calculate_reward(self, state):\n \"\"\"\n Calculate the reward for the current state\n\n Args:\n state (np.array): [bar angle, bar angular velocity]\n\n Returns:\n reward (float): Reward for the current state\n \"\"\"\n\n # Constants to scale the bar and motor angle penalties\n ANGLE_WEIGHT = 1.0\n VELOCITY_WEIGHT = 0.1\n MOTOR_ANGLE_WEIGHT = 0.001\n ACTION_WEIGHT = 0.001\n\n # Calculate the angle penalty\n angle_penalty = ANGLE_WEIGHT * (state[0]) ** 2\n # Calculate the velocity penalty\n velocity_penalty = VELOCITY_WEIGHT * (state[1]) ** 2\n # Calculate the motor angle penalty\n # motor_angle_penalty = MOTOR_ANGLE_WEIGHT * (self.motorAngle/self.motor_angle_range[1]) ** 2\n # Calculate the action penalty\n action_penalty = ACTION_WEIGHT * (self.action/100) ** 2\n\n # Calculate the reward\n reward = - (angle_penalty + velocity_penalty)\n\n # NEW REWARD FUNCTION\n # reward range [-1, 0]\n # angle_target = 0.0\n # angular_velocity_target = 0.0\n # motor_angle_target = 0.0\n\n # reward = -1/2 * (np.abs(state[0] - angle_target)/np.pi + np.abs(self.motorAngle - motor_angle_target)/self.motor_angle_range[1])\n # reward = - 1/2 * (np.abs(state[0] - angle_target) + np.abs(state[1] - angular_velocity_target))\n # if the episode is done with enough iterations\n # if self.iterCount > int(self.maxIter/2) and self.done:\n # # if the average of the bar angles is less than 90 degrees\n # if np.abs(np.mean(self.episode_angles)) < np.deg2rad(90):\n # reward += 100.0\n\n # if the episode is done with not enough iterations\n # if self.iterCount < int(self.maxIter/10) and self.terminated:\n # # if the motor angle is out of range\n # if self.out_of_range:\n # reward -= 2000.0\n \n return reward\n \n def reset_policy(self, reset_count=200):\n \"\"\"\n Policy to reset the environment\n\n Args:\n reset_count (int, optional): Number of iterations to wait before resetting the system. Defaults to 200.\n \n Returns:\n None\n \"\"\"\n\n if self.iterCount >= reset_count:\n self.terminated = True\n\n def send_fake_serial(self, command):\n \"\"\"\n Send a command to the pendulum, simulating a fake serial connection\n\n Args:\n command (list): [motor speed percentage, episode done flag]\n\n Returns:\n None\n \"\"\"\n\n motor_speed_percentage = command[0]\n episode_done = command[1]\n\n if episode_done:\n self.terminated = True\n self.reset_robot(mode=\"random\")\n else:\n self.terminated = False\n # Calculate the motor speed in steps per second\n motor_speed = motor_speed_percentage * self.max_motor_speed / 100.0\n # set the motor velocity\n p.setJointMotorControl2(bodyUniqueId=self.robotId,\n jointIndex=self.motor_joint_idx,\n controlMode=p.VELOCITY_CONTROL,\n targetVelocity=motor_speed,\n )\n\n # time.sleep(0.1)\n \n def get_state(self):\n \"\"\"\n Read the state from the pendulum, simulating a fake serial connection\n\n Args:\n None\n\n Returns:\n state (np.array): [bar angle, bar angular velocity]\n motor_angle (float): Motor angle in degrees\n done (bool): Episode done flag\n \"\"\"\n\n # Get the bar angle\n bar_angle = p.getJointState(self.robotId, self.bar_joint_idx)[0] + self.bar_compensation_angle\n # Get bar angular velocity\n bar_angular_velocity = p.getJointState(self.robotId, self.bar_joint_idx)[1]\n # Get the motor angle\n motor_angle = np.rad2deg(p.getJointState(self.robotId, self.motor_joint_idx)[0] + self.motor_compensation_angle)\n\n # Map the motor angle to the correct range\n if motor_angle > self.motor_angle_range[1] or motor_angle < self.motor_angle_range[0]:\n self.out_of_range = True\n else:\n self.out_of_range = False\n \n # Adjusting the bar angle to map correctly\n bar_angle = bar_angle % (2 * np.pi) # Normalize the angle to be within 0 to 2π\n if bar_angle > np.pi:\n bar_angle -= 2 * np.pi # Adjust angles greater than π to be between -π to π\n \n if bar_angle > 0:\n bar_angle = np.pi - bar_angle\n elif bar_angle < 0:\n bar_angle = -np.pi - bar_angle\n\n # round the states to 4 decimal places\n bar_angle = round(bar_angle/np.pi, 4)\n bar_angular_velocity = round(bar_angular_velocity/self.omega_max, 4)\n motor_angle = round(motor_angle, 4)\n\n return np.array([bar_angle, bar_angular_velocity]), motor_angle, self.out_of_range\n \n def reset_robot(self, mode=\"random\"):\n \"\"\"\n Reset the robot state\n\n Args:\n mode (str, optional): Mode to reset the robot. Defaults to \"random\".\n\n Returns:\n state (np.array): [bar angle, bar angular velocity]\n \"\"\"\n\n if mode == \"random\":\n # Reset the robot to a random position\n bar_angle = np.random.uniform(-np.pi, np.pi)\n bar_angular_velocity = np.random.uniform(-self.omega_max, self.omega_max)\n motor_angle = np.deg2rad(np.random.uniform(self.motor_angle_range[0], self.motor_angle_range[1]))\n\n # Set the robot to the random position\n p.resetJointState(self.robotId, self.bar_joint_idx, targetValue=bar_angle)\n # p.resetJointState(self.robotId, self.motor_joint_idx, targetValue=motor_angle)\n p.resetJointState(self.robotId, self.motor_joint_idx, targetValue=-self.motor_compensation_angle)\n # set bar velocity with no force\n p.setJointMotorControl2(bodyUniqueId=self.robotId,\n jointIndex=self.bar_joint_idx,\n controlMode=p.VELOCITY_CONTROL,\n targetVelocity=bar_angular_velocity,\n force=0\n )\n elif mode == \"home\":\n # Reset the robot to the home position\n p.resetJointState(self.robotId, self.bar_joint_idx, targetValue=-self.bar_compensation_angle)\n p.resetJointState(self.robotId, self.motor_joint_idx, targetValue=-self.motor_compensation_angle)\n\n # set bar velocity with no force\n p.setJointMotorControl2(bodyUniqueId=self.robotId,\n jointIndex=self.bar_joint_idx,\n controlMode=p.VELOCITY_CONTROL,\n targetVelocity=0,\n force=0\n )\n \n return self.get_state()[0]\n \n def render(self, fps=240.0):\n \"\"\"\n Render the pendulum in PyBullet\n\n Args:\n fps (float, optional): Number of frames per second. Defaults to 240.0.\n\n Returns:\n None\n \"\"\"\n p.stepSimulation()\n if self.render_mode == \"human\":\n time.sleep(1./fps)\n \n def close(self):\n \"\"\"\n Close the PyBullet connection\n\n Args:\n None\n\n Returns:\n None\n \"\"\"\n p.disconnect()" }, { "identifier": "FakeEnv", "path": "control/reinforcement_learning/Environments/FakeEnv.py", "snippet": "class FakeEnv:\n \"\"\"\n Fake environment for testing purposes\n \"\"\"\n def __init__(self, nbJoint=1):\n \"\"\"\n Initialize the fake environment.\n\n Args:\n nbJoint (int): The number of joints to simulate.\n\n Returns:\n None\n \"\"\"\n\n self.name = \"FakeEnv\"\n self.nbJoint = nbJoint\n if nbJoint == 1:\n self.num_state = 2\n self.name = \"1-fakeenv\"\n else:\n self.num_state = 2*nbJoint\n self.name = str(nbJoint)+\"-fakeenv\"\n \n self.x = np.zeros(self.num_state)\n self.vmax = 8.0\n self.iterCount = 0\n self.maxIter = 1000\n self.range_actions = np.array([-100.0, 100.0])\n self.done = False\n\n def reset(self):\n \"\"\"\n Reset the environment to the initial state (random)\n\n Args:\n None\n\n Returns:\n state (np.array): list of joint angles and velocities\n \"\"\"\n\n for i in range(self.num_state):\n if i%2==0:\n self.x[i] = np.random.uniform(-np.pi, np.pi)\n else:\n self.x[i] = np.random.uniform(-self.vmax, self.vmax)\n self.iterCount = 0\n self.done = False\n return self.x\n \n def step(self, action):\n \"\"\"\n Take a step in the environment (random)\n\n Args:\n action (float): The action to take (it is not used)\n\n Returns:\n state (np.array): list of joint angles and velocities\n reward (float): The reward for the action taken (it is not used)\n done (bool): Whether the episode is done or not\n \"\"\"\n\n for i in range(self.num_state):\n if i%2==0:\n self.x[i] = np.random.uniform(-np.pi, np.pi)\n else:\n self.x[i] = np.random.uniform(-self.vmax, self.vmax)\n self.iterCount += 1\n if self.iterCount >= self.maxIter:\n self.done = True\n return self.x, np.random.uniform(-1, 1), self.done\n \n def render(self, debug=False):\n \"\"\"\n Print the current state\n\n Args:\n debug (bool): Whether to print the state or not\n\n Returns:\n None\n \"\"\"\n \n if debug:\n print(self.x)" }, { "identifier": "Agent", "path": "control/reinforcement_learning/DQN/Agent.py", "snippet": "class Agent:\n \"\"\"\n DQN Agent\n - Take an environment\n - Set up the deep neural network\n - Store the experience\n - Choose action\n - Train the network\n - Evaluate the network\n \"\"\"\n def __init__(self, env):\n\n # check if gpu is available\n if tf.config.list_physical_devices('GPU'):\n # print the device name\n print(\"GPU is available\")\n print(\"Device name: {}\".format(tf.test.gpu_device_name()))\n \n else:\n print(\"GPU is not available\")\n\n self.env = env\n \n self.nJoint = self.env.nbJoint\n \n # read INI file\n # get the path of the root directory\n root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n ini_file_path = os.path.join(root_dir, 'config.ini')\n self.params = self.parse_ini(ini_file_path)\n \n # set up the parameters from the INI file\n self.action_steps = int(self.params['action_steps'])\n self.torque_range = ast.literal_eval(self.params['control_range'])\n self.max_episode_steps = int(self.params['max_episode_steps'])\n self.train_episodes = int(self.params['train_episodes'])\n self.lr = float(self.params['lr'])\n self.discount_factor = float(self.params['discount_factor'])\n self.epsilon = float(self.params['epsilon'])\n self.epsilon_decay_episodes = int(self.params['epsilon_decay_episodes'])\n self.epsilon_final = float(self.params['epsilon_final'])\n self.buffer_size = int(self.params['buffer_size'])\n self.batch_size = int(self.params['batch_size'])\n self.hidden_dims = ast.literal_eval(self.params['hidden_dims'])\n self.update_rate_episodes = int(self.params['target_update_episodes'])\n self.train_rate_steps = int(self.params['train_rate_steps'])\n\n self.discounted_reward = 0.0\n self.epsilon_decay = (self.epsilon - self.epsilon_final) / self.epsilon_decay_episodes\n\n # set up the environment parameters\n self.env.num_actions = self.action_steps\n self.env.range_actions = self.torque_range\n self.env.maxIter = self.max_episode_steps\n self.env.umax = self.torque_range[1]\n self.env.actions = np.linspace(self.env.range_actions[0], self.env.range_actions[1], self.action_steps)\n self.env.action_space = [i for i in range(self.action_steps)]\n self.action_space = self.env.action_space\n\n self.total_step_counter = 0\n self.replay_buffer = ReplayBuffer(self.buffer_size)\n\n self.name_model = self.env.name + '_'+str(self.action_steps)+'_'+str(self.max_episode_steps)+'_' + \\\n str(self.epsilon_decay_episodes)+'_'+str(self.buffer_size)+'_'+str(self.batch_size)+'_'+ \\\n str(self.hidden_dims)+'_'+str(self.update_rate_episodes)+'_'+str(self.train_rate_steps)\n \n # path of the weights folder\n self.weights_folder = os.path.join(root_dir, 'saved_weights')\n self.final_weights_folder = os.path.join(root_dir, 'final_results/'+self.env.name)\n self.weights_name = ['dqn_weights_' + self.name_model +'.h5',\n 'dqn_target_weights_' + self.name_model +'.h5']\n \n # save metrics in a csv file (not used)\n self.metrics_folder = os.path.join(root_dir, 'saved_metrics')\n self.metrics_df = pd.DataFrame()\n self.metrics_name = ''\n\n # save the logs in a tensorboard file\n self.log_dir = os.path.join(root_dir, 'logs')\n self.writer = tf.summary.create_file_writer(os.path.join(self.log_dir, self.name_model))\n\n # create the deep neural network\n self.q_net = DeepQNetwork(self.lr, self.env.num_actions, self.env.num_state, self.hidden_dims , opt='adam', loss='mse')\n self.q_target_net = DeepQNetwork(self.lr, self.env.num_actions, self.env.num_state, self.hidden_dims, opt='adam', loss='mse')\n\n self.loss = 0.0\n self.current_episode = 0\n self.training_time = 0\n \n def policy(self, observation, type='epsilon_greedy'):\n \"\"\"\n Choose an action based on the policy\n \"\"\"\n if type == 'epsilon_greedy':\n if np.random.random() < self.epsilon:\n action = np.random.choice(self.action_space)\n else:\n action = np.argmax(self.q_net.predict(np.array([observation])))\n elif type == 'greedy':\n action = np.argmax(self.q_net.predict(np.array([observation])))\n elif type == 'random':\n action = np.random.choice(self.action_space)\n else:\n raise Exception(\"Unknown policy type\")\n \n return action\n \n def train(self):\n \"\"\"\n Train the network\n \"\"\"\n # check if the replay buffer has enough experiences\n if len(self.replay_buffer.gameplay_experiences) < self.batch_size:\n return\n \n # sample a batch of experiences\n states, actions, rewards, new_states, dones = self.replay_buffer.sample_batch(self.batch_size)\n \n # predict the q values of current states and next states\n q_predicted = self.q_net.predict(states)\n q_next = self.q_target_net.predict(new_states)\n # get the maximum q value of the next states\n q_max_next = np.max(q_next, axis=1)\n # copy the q values of the current states\n q_target = q_predicted.copy()\n \n for i in range(self.batch_size):\n # Q(s, a) = r + γ * max(Q(s', a')) * (1 - done)\n # if the next state is terminal, then the q value is just the reward\n # otherwise, estimate the q value using the target network\n q_target[i, actions[i]] = rewards[i] + self.discount_factor * q_max_next[i] * (1 - dones[i])\n \n # train the network in batches\n self.loss = self.q_net.train_on_batch(states, q_target)\n # self.loss = self.q_net.train_batch_gradientTape(states, q_target)\n \n def train_model(self, render=True, plot=True, verbose=False, soft_start=False):\n \"\"\"\n Train the model for a number of episodes and plot the reward\n \"\"\"\n\n # start from existing weights\n if soft_start:\n # load the weights\n self.q_net.model.load_weights(os.path.join(self.weights_folder, self.weights_name[0]))\n self.q_target_net.model.load_weights(os.path.join(self.weights_folder, self.weights_name[1]))\n\n start_training_time = time.time()\n\n # train the network for a number of episodes\n for episode in range(self.train_episodes):\n observation, _ = self.env.reset()\n done = False\n self.discounted_reward = 0.0\n episode_steps = 0\n self.loss = 0.0\n self.current_episode = episode\n\n # while the episode is not done\n while not done:\n if render:\n self.env.render()\n\n # copy of the observation to store in the replay buffer\n # because when passing the env reference, the old observation gets overwritten\n observation_copy = copy.copy(observation)\n action = self.policy(observation, 'epsilon_greedy')\n new_observation, reward, done, _, _ = self.env.step(self.env.actions[action])\n new_observation_copy = copy.copy(new_observation)\n self.discounted_reward += self.discount_factor**episode_steps * reward\n \n # store the experience in the replay buffer\n self.replay_buffer.store_tuple(observation_copy, action, reward, new_observation_copy, done)\n observation = new_observation_copy\n\n # train the network every train_rate_steps\n if self.total_step_counter % self.train_rate_steps == 0 or done:\n self.train()\n \n self.total_step_counter += 1\n episode_steps += 1\n \n # update the epsilon\n self.epsilon = self.epsilon - self.epsilon_decay if self.epsilon > self.epsilon_final else self.epsilon_final\n # update the target network\n if (episode+1) % self.update_rate_episodes == 0:\n self.q_target_net.model.set_weights(self.q_net.model.get_weights()) \n # save the weights every 10 episodes\n if episode % 10 == 0:\n self.q_net.model.save_weights(os.path.join(self.weights_folder, self.weights_name[0]))\n self.q_target_net.model.save_weights(os.path.join(self.weights_folder, self.weights_name[1]))\n # clear the session to avoid memory leaks\n K.clear_session() \n \n # save the metrics to tensorboard\n with self.writer.as_default():\n tf.summary.scalar('loss', self.loss, step=episode)\n tf.summary.scalar('epsilon', self.epsilon, step=episode)\n tf.summary.scalar('reward', self.discounted_reward, step=episode)\n tf.summary.scalar('episode_steps', episode_steps, step=episode)\n self.writer.flush()\n \n self.training_time = time.time() - start_training_time\n print(\"Training time: {}\".format(self.training_time))\n \n def evaluate_model(self, episodes, swingUp=False, render=True, plot=True, verbose=False, final=False):\n \"\"\"\n Evaluate the model for a number of episodes\n \"\"\"\n\n # load the weights from the final results folder\n if final:\n self.q_net.model.load_weights(os.path.join(self.final_weights_folder, self.weights_name[0]))\n self.q_target_net.model.load_weights(os.path.join(self.final_weights_folder, self.weights_name[1]))\n else:\n self.q_net.model.load_weights(os.path.join(self.weights_folder, self.weights_name[0]))\n self.q_target_net.model.load_weights(os.path.join(self.weights_folder, self.weights_name[1]))\n\n theta_list = []\n theta_dot_list = []\n torque_list = []\n\n for episode in range(episodes):\n\n # set the environment to the initial state (theta=0, theta_dot=0)\n if swingUp:\n # observation = self.env.reset_swingUp()\n observation = self.env.reset_robot(mode=\"home\")\n else:\n observation = self.env.reset()\n done = False\n episode_reward = 0\n\n # evaluate the model for a number of steps\n while not done:\n if render:\n self.env.render()\n \n action = self.policy(observation, 'greedy')\n new_observation, reward, done = self.env.step(self.env.actions[action])\n new_observation_copy = copy.copy(new_observation)\n episode_reward += reward\n \n observation = new_observation_copy\n \n # append the angle, angular velocity and torque to the lists\n if self.nJoint == 1:\n theta_list.append(observation[0])\n theta_dot_list.append(observation[1])\n torque_list.append(self.env.actions[action])\n else:\n theta_list.append([observation[0], observation[2]])\n theta_dot_list.append([observation[1], observation[3]])\n torque_list.append([self.env.actions[action], 0.0])\n\n if verbose:\n print(\"Episode: {}, Step: {}, Reward: {}\".format(episode, self.env.iterCount, episode_reward))\n \n if plot:\n # plot the angle, angular velocity and torque using sns\n sns.set()\n # plot the angle\n # if the pendulum is single\n if self.nJoint == 1:\n # plot the angles\n plt.plot(theta_list)\n plt.xlabel(\"Steps\")\n plt.ylabel(\"Angle\")\n plt.legend([\"q\"])\n plt.title(\"Swing Up Angle\")\n plt.show()\n # plot the angular velocities\n plt.plot(theta_dot_list)\n plt.xlabel(\"Steps\")\n plt.ylabel(\"Angular Velocity\")\n plt.legend([\"dq\"])\n plt.title(\"Swing Up Angular Velocity\")\n plt.show()\n # plot the torques\n plt.plot(torque_list)\n plt.xlabel(\"Steps\")\n plt.ylabel(\"Torque\")\n plt.legend([\"tau\"])\n plt.title(\"Swing Up Torque\")\n plt.show()\n # if the pendulum is double\n else:\n # plot the angles\n plt.plot(theta_list)\n plt.xlabel(\"Steps\")\n plt.ylabel(\"Angles\")\n plt.legend([\"q1\", \"q2\"])\n plt.title(\"Swing Up Angles\")\n plt.show()\n # plot the angular velocities\n plt.plot(theta_dot_list)\n plt.xlabel(\"Steps\")\n plt.ylabel(\"Angular Velocities\")\n plt.legend([\"dq1\", \"dq2\"])\n plt.title(\"Swing Up Angular Velocities\")\n plt.show()\n # plot the torques\n plt.plot(torque_list)\n plt.xlabel(\"Steps\")\n plt.ylabel(\"Torques\")\n plt.legend([\"tau1\", \"tau2\"])\n plt.title(\"Swing Up Torques\")\n plt.show()\n\n def plot_value_policy(self, visual='2D', resolution=10, final=False):\n \"\"\"\n Plot the value function and the policy of single pendulum\n \"\"\"\n # Load the weights from the final results folder\n if final:\n self.q_net.model.load_weights(os.path.join(self.final_weights_folder, self.weights_name[0]))\n self.q_target_net.model.load_weights(os.path.join(self.final_weights_folder, self.weights_name[1]))\n else:\n self.q_net.model.load_weights(os.path.join(self.weights_folder, self.weights_name[0]))\n self.q_target_net.model.load_weights(os.path.join(self.weights_folder, self.weights_name[1]))\n\n # Discretize the state space\n theta = np.linspace(-np.pi, np.pi, resolution)\n theta_dot = np.linspace(-self.env.vmax, self.env.vmax, resolution)\n\n # Create meshgrid\n theta_mesh, theta_dot_mesh = np.meshgrid(theta, theta_dot)\n\n # Initialize value function and policy arrays\n V = np.zeros_like(theta_mesh)\n P = np.zeros_like(theta_mesh)\n\n # Iterate over each state in the meshgrid\n for i in range(resolution):\n for j in range(resolution):\n state = np.array([theta_mesh[i, j], theta_dot_mesh[i, j]])\n state_tensor = tf.constant(state, dtype=tf.float32)\n q_values = self.q_net.model(state_tensor[None])[0]\n V[i, j] = tf.reduce_max(q_values)\n P[i, j] = tf.argmax(q_values)\n # map the action index to the action value\n P[i, j] = self.env.actions[int(P[i, j])]\n \n if visual=='3D':\n # Set the viewing angles\n elevation = 90 # Viewing angle from above\n azimuth = -90 # Rotate around the z-axis\n\n # Create 3D plots\n fig = plt.figure(figsize=(10, 5))\n ax1 = fig.add_subplot(121, projection='3d')\n value_surf = ax1.plot_surface(theta_mesh, theta_dot_mesh, V, cmap=cm.viridis)\n ax1.view_init(elevation, azimuth) # Set the viewing angles\n ax1.set_xlabel('q')\n ax1.set_ylabel('dq')\n ax1.set_zlabel('Value')\n ax1.set_title('Value Function')\n fig.colorbar(value_surf, shrink=0.5, aspect=5)\n\n ax2 = fig.add_subplot(122, projection='3d')\n policy_surf = ax2.plot_surface(theta_mesh, theta_dot_mesh, P, cmap=cm.Spectral)\n ax2.view_init(elevation, azimuth) # Set the viewing angles\n ax2.set_xlabel('q')\n ax2.set_ylabel('dq')\n ax2.set_zlabel('Action')\n ax2.set_title('Policy Function')\n fig.colorbar(policy_surf, shrink=0.5, aspect=5)\n else:\n # Set Seaborn style\n sns.set()\n\n # Create 2D plots with colormaps using Seaborn\n fig, axes = plt.subplots(1, 2, figsize=(10, 5))\n\n # Plot the value function\n ax1 = axes[0]\n sns.heatmap(V, cmap='viridis', ax=ax1, cbar=True)\n # set ticks as theta and theta_dot\n ax1.set_xticks(np.linspace(0, resolution, 5))\n ax1.set_xticklabels([-3, -1, 0, 1, 3])\n ax1.set_yticks(np.linspace(0, resolution, 5))\n ax1.set_yticklabels(np.linspace(-self.env.vmax, self.env.vmax, 5, dtype=int))\n ax1.set_xlabel('q')\n ax1.set_ylabel('dq')\n ax1.set_title('Value Function')\n\n # Plot the policy\n ax2 = axes[1]\n sns.heatmap(P, cmap='Spectral', ax=ax2, cbar=True)\n # set ticks as theta and theta_dot\n ax2.set_xticks(np.linspace(0, resolution, 5))\n ax2.set_xticklabels([-3, -1, 0, 1, 3])\n ax2.set_yticks(np.linspace(0, resolution, 5))\n ax2.set_yticklabels(np.linspace(-self.env.vmax, self.env.vmax, 5, dtype=int))\n ax2.set_xlabel('q')\n ax2.set_ylabel('dq')\n ax2.set_title('Policy Function')\n plt.tight_layout()\n\n plt.show()\n\n def parse_ini(self, ini_file):\n \"\"\"\n Parse the ini file with the env parameters\n \"\"\"\n config = configparser.ConfigParser()\n config.read(ini_file)\n\n if self.env.name == 'RealPendulum' or self.env.name == 'PyBulletPendulum' or self.env.name == '1-fakeenv':\n # parse the 'rotary_pendulum' section\n return config['rotary_pendulum']\n else:\n # raise an exception if the environment is not supported\n return Exception(\"Environment not supported\")\n \n def save_metrics(self, episode, episode_reward, last_loss, last_epsilon, episode_time):\n \"\"\"\n Save the metrics in a dataframe and export it to a csv file\n \"\"\"\n # if the dataframe is empty, create it\n if self.metrics_df.empty:\n self.metrics_df = pd.DataFrame(columns=['episode', 'reward', 'loss', 'epsilon', 'time'])\n timestamp_ep = datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n self.metrics_name = self.name_model + '_' + timestamp_ep + '.csv'\n \n # append the metrics to the dataframe using iloc\n self.metrics_df.loc[len(self.metrics_df)] = [episode, episode_reward, last_loss, last_epsilon, episode_time]\n # export the dataframe to a csv file with timestamp\n self.metrics_df.to_csv(os.path.join(self.metrics_folder, self.metrics_name), index=False)" } ]
from ..Environments import RealPendulumEnv as real from ..Environments import PyBulletPendulumEnv as pb from ..Environments import FakeEnv as fake from ..DQN.Agent import Agent
10,484
isFake = False isPyBullet = True isReal = False train = True plot_colormaps = False # select the environment if isFake: env = fake.FakeEnv(1) elif isPyBullet: env = pb.PyBulletPendulumEnv(render_mode='human') elif isReal: env = real.RealPendulumEnv("COM3", 115200) else: raise Exception("No environment selected!") # create the agent
isFake = False isPyBullet = True isReal = False train = True plot_colormaps = False # select the environment if isFake: env = fake.FakeEnv(1) elif isPyBullet: env = pb.PyBulletPendulumEnv(render_mode='human') elif isReal: env = real.RealPendulumEnv("COM3", 115200) else: raise Exception("No environment selected!") # create the agent
dqn_agent = Agent(env)
3
2023-12-09 11:22:54+00:00
12k
JayYik/GAN_implementations
utils/get_model.py
[ { "identifier": "DCGAN", "path": "models/DCGAN.py", "snippet": "class DCGAN(nn.Module):\n def __init__(self, args):\n super(DCGAN, self).__init__()\n self.G=DCGAN_G(args.hw,args.z_dim,args.in_channels)\n self.D=DCGAN_D(args.hw,args.in_channels)\n # self.G.weight_init()\n # self.D.weight_init()\n\n self.args=args\n self.batch_size=args.batch_size\n self.z_dim=args.z_dim\n self.bce_loss = nn.BCELoss()\n\n self.optim_g = optim.Adam(self.G.parameters(), lr=args.lr_g,betas=args.betas)\n self.optim_d = optim.Adam(self.D.parameters(), lr=args.lr_d,betas=args.betas)\n self.scheduler_optim_g=optim.lr_scheduler.MultiStepLR(self.optim_g, milestones=[100,150], gamma=0.9)\n self.scheduler_optim_d=optim.lr_scheduler.LambdaLR(self.optim_d, lr_lambda=self.warm_up)\n \n # Recording program start time for log directory naming\n program_begin_time = t.strftime('%Y-%m-%d %H:%M', t.localtime())\n # Logging information\n self.information=f'DCGAN-{program_begin_time}'\n # TensorBoard SummaryWriter for logging\n self.writer=SummaryWriter(os.path.join(self.args.log_dir,self.information))\n\n def warm_up(self,epoch):\n \"\"\"\n Learning rate warm-up function for the Adam optimizer.\n\n Args:\n epoch (int): Current epoch number.\n\n Returns:\n float: Adjusted learning rate based on the warm-up strategy.\n \"\"\"\n top_epoch = int(self.args.num_epochs*0.3)\n if epoch<top_epoch:\n #In the first 30% of epochs, slowly increase the LR to the preset LR\n return (epoch+1) / top_epoch\n else:\n #Drop the LR to half of the preset\n return (1 -( 0.5 / (self.args.num_epochs - top_epoch) * (epoch - top_epoch) ) )\n\n def save_model(self,epoch):\n save_path=f'./save/{self.information}'\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n torch.save(self.G.state_dict(), f'{save_path}/generator_{epoch}epochs.pth')\n torch.save(self.D.state_dict(), f'{save_path}/discriminator_{epoch}epochs.pth')\n self.save_args(save_path)\n print(f'Models save to {save_path}/generator_{epoch}epochs.pth & {save_path}/discriminator_{epoch}epochs.pth ')\n\n def save_args(self,save_path):\n argsDict = self.args.__dict__\n with open(f'{save_path}/setting.txt', 'w') as f:\n f.writelines('------------------ start ------------------' + '\\n')\n for eachArg, value in argsDict.items():\n f.writelines(eachArg + ' : ' + str(value) + '\\n')\n f.writelines('------------------- end -------------------')\n\n\n def train(self,train_loader,device):\n \"\"\"\n Training function for the DCGAN model.\n\n Args:\n train_loader (DataLoader): DataLoader for training data.\n device (torch.device): The device (CPU or GPU) to perform training.\n\n Returns:\n None\n \"\"\"\n\n # Move the model and loss to the specified device\n self.G.to(device)\n self.D.to(device)\n self.bce_loss.to(device)\n generator_iter = 0\n descriminator_iter = 0\n \n # Training loop\n for epoch in range(self.args.num_epochs):\n self.t_begin = t.time()\n pbar=tqdm(enumerate(train_loader),total=len(train_loader),ncols=100)\n\n for i, (images, _) in pbar:\n if i == train_loader.dataset.__len__() // self.batch_size:\n break\n\n # Generate random noise and labels\n z = torch.randn((self.batch_size, self.z_dim, 1, 1))\n real_labels = torch.ones(self.batch_size)\n fake_labels = torch.zeros(self.batch_size)\n\n # Move data to the specified device\n images=images.to(device)\n z=z.to(device)\n real_labels=real_labels.to(device)\n fake_labels=fake_labels.to(device)\n\n # Train Discriminator\n real_output = self.D(images)\n #print('real_output:',real_output)\n fake_images = self.G(z)\n fake_output = self.D(fake_images)\n d_real_loss = self.bce_loss(real_output.flatten(), real_labels)\n d_fake_loss = self.bce_loss(fake_output.flatten(), fake_labels)\n #print('real_loss:',d_real_loss.item(),' fake_loss:',d_fake_loss.item())\n d_loss = d_real_loss + d_fake_loss\n self.D.zero_grad()\n d_loss.backward()\n self.writer.add_scalar('D_loss', d_loss.item(), descriminator_iter)\n self.optim_d.step()\n descriminator_iter+=1\n\n # Train Generator\n if i % self.args.des_iter == 0:\n #print(\"i:\",i)\n self.D.zero_grad()\n self.G.zero_grad()\n\n z = torch.randn((self.batch_size, self.z_dim, 1, 1))\n z = z.to(device)\n\n fake_images = self.G(z)\n fake_output = self.D(fake_images)\n fake_score = fake_output.squeeze().mean()\n #print('fake_output:',fake_output)\n g_loss = self.bce_loss(fake_output.flatten(), real_labels)\n g_loss.backward()\n pbar.set_postfix({'G_loss': g_loss.item(),'D_loss': d_loss.item(),'fake_socre':fake_score.item()})\n #print('g_loss:',g_loss.item())\n self.optim_g.step()\n self.writer.add_scalar('G_loss', g_loss.item(), generator_iter)\n generator_iter+=1\n \n # Save generated images\n if generator_iter % 500 == 0:\n \n if not os.path.exists(f'./training_result_{self.args.dataset}-{self.information}/'):\n os.makedirs(f'./training_result_{self.args.dataset}-{self.information}/')\n\n z = torch.randn((self.batch_size,self.args.z_dim, 1, 1))\n z=z.to(device)\n samples = self.G(z)\n samples = samples.mul(0.5).add(0.5)\n samples = samples.data.cpu()[:25]\n grid = torchvision.utils.make_grid(samples,nrow=5)\n torchvision.utils.save_image(grid, './training_result_{}/img_generatori_iter_{}.png'.format(self.args.dataset+'-'+self.information,str(generator_iter).zfill(3)))\n \n # Learning rate scheduling\n self.scheduler_optim_d.step()\n self.scheduler_optim_g.step()\n\n # Print and log training information\n print(self.optim_d.state_dict()['param_groups'][0]['lr'])\n print(self.optim_g.state_dict()['param_groups'][0]['lr'])\n self.t_end = t.time()\n print(\n \"[Epoch %d/%d] [D loss: %f] [G loss: %f] [training time: %.3fseconds]\"\n % (epoch, self.args.num_epochs, d_loss.item(), g_loss.item() , (self.t_end - self.t_begin))\n )\n\n # Save the trained parameters\n if epoch % (self.args.num_epochs // 5) == 0 and epoch !=0:\n self.save_model(epoch)" }, { "identifier": "GAN", "path": "models/GAN.py", "snippet": "class GAN(nn.Module):\n def __init__(self, args):\n super(GAN, self).__init__()\n self.G=GAN_G(args.hw,args.z_dim,args.in_channels)\n self.D=GAN_D(args.hw,args.in_channels)\n\n\n self.args=args\n self.batch_size=args.batch_size\n self.z_dim=args.z_dim\n self.bce_loss = nn.BCELoss()\n\n self.optim_g = optim.Adam(self.G.parameters(), lr=args.lr_g,betas=args.betas)\n self.optim_d = optim.Adam(self.D.parameters(), lr=args.lr_d,betas=args.betas)\n\n \n # Recording program start time for log directory naming\n program_begin_time = t.strftime('%Y-%m-%d %H:%M', t.localtime())\n # Logging information\n self.information=f'GAN-{program_begin_time}'\n # TensorBoard SummaryWriter for logging\n self.writer=SummaryWriter(os.path.join(self.args.log_dir,self.information))\n\n\n def save_model(self,epoch):\n save_path=f'./save/{self.information}'\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n torch.save(self.G.state_dict(), f'{save_path}/generator_{epoch}epochs.pth')\n torch.save(self.D.state_dict(), f'{save_path}/discriminator_{epoch}epochs.pth')\n self.save_args(save_path)\n print(f'Models save to {save_path}/generator_{epoch}epochs.pth & {save_path}/discriminator_{epoch}epochs.pth ')\n\n def save_args(self,save_path):\n argsDict = self.args.__dict__\n with open(f'{save_path}/setting.txt', 'w') as f:\n f.writelines('------------------ start ------------------' + '\\n')\n for eachArg, value in argsDict.items():\n f.writelines(eachArg + ' : ' + str(value) + '\\n')\n f.writelines('------------------- end -------------------')\n\n\n def train(self,train_loader,device):\n \"\"\"\n Training function for the GAN model.\n\n Args:\n train_loader (DataLoader): DataLoader for training data.\n device (torch.device): The device (CPU or GPU) to perform training.\n\n Returns:\n None\n \"\"\"\n\n # Move the model and loss to the specified device\n self.G.to(device)\n self.D.to(device)\n self.bce_loss.to(device)\n generator_iter = 0\n descriminator_iter = 0\n \n # Training loop\n for epoch in range(self.args.num_epochs):\n self.t_begin = t.time()\n pbar=tqdm(enumerate(train_loader),total=len(train_loader),ncols=100)\n\n for i, (images, _) in pbar:\n if i == train_loader.dataset.__len__() // self.batch_size:\n break\n\n # Generate random noise and labels\n z = torch.randn((self.batch_size, self.z_dim))\n real_labels = torch.ones(self.batch_size)\n fake_labels = torch.zeros(self.batch_size)\n\n # Move data to the specified device\n images=images.to(device)\n z=z.to(device)\n real_labels=real_labels.to(device)\n fake_labels=fake_labels.to(device)\n\n # Train Discriminator\n real_output = self.D(images)\n #print('real_output:',real_output)\n fake_images = self.G(z)\n fake_output = self.D(fake_images)\n d_real_loss = self.bce_loss(real_output.flatten(), real_labels)\n d_fake_loss = self.bce_loss(fake_output.flatten(), fake_labels)\n #print('real_loss:',d_real_loss.item(),' fake_loss:',d_fake_loss.item())\n d_loss = d_real_loss + d_fake_loss\n self.D.zero_grad()\n d_loss.backward()\n self.writer.add_scalar('D_loss', d_loss.item(), descriminator_iter)\n self.optim_d.step()\n descriminator_iter+=1\n\n # Train Generator\n if i % self.args.des_iter == 0:\n #print(\"i:\",i)\n self.D.zero_grad()\n self.G.zero_grad()\n\n z = torch.randn((self.batch_size, self.z_dim))\n z = z.to(device)\n\n fake_images = self.G(z)\n fake_output = self.D(fake_images)\n fake_score = fake_output.squeeze().mean()\n #print('fake_output:',fake_output)\n g_loss = self.bce_loss(fake_output.flatten(), real_labels)\n g_loss.backward()\n pbar.set_postfix({'G_loss': g_loss.item(),'D_loss': d_loss.item(),'fake_socre':fake_score.item()})\n #print('g_loss:',g_loss.item())\n self.optim_g.step()\n self.writer.add_scalar('G_loss', g_loss.item(), generator_iter)\n generator_iter+=1\n \n # Save generated images\n if generator_iter % 500 == 0:\n \n if not os.path.exists(f'./training_result_{self.args.dataset}-{self.information}/'):\n os.makedirs(f'./training_result_{self.args.dataset}-{self.information}/')\n\n z = torch.randn((self.batch_size,self.args.z_dim))\n z=z.to(device)\n samples = self.G(z)\n samples = samples.mul(0.5).add(0.5)\n samples = samples.data.cpu()[:25]\n grid = torchvision.utils.make_grid(samples,nrow=5)\n torchvision.utils.save_image(grid, './training_result_{}/img_generatori_iter_{}.png'.format(self.args.dataset+'-'+self.information,str(generator_iter).zfill(3)))\n \n\n # Print and log training information\n print(self.optim_d.state_dict()['param_groups'][0]['lr'])\n print(self.optim_g.state_dict()['param_groups'][0]['lr'])\n self.t_end = t.time()\n print(\n \"[Epoch %d/%d] [D loss: %f] [G loss: %f] [training time: %.3fseconds]\"\n % (epoch, self.args.num_epochs, d_loss.item(), g_loss.item() , (self.t_end - self.t_begin))\n )\n\n # Save the trained parameters\n if epoch % (self.args.num_epochs // 5) == 0 and epoch !=0:\n self.save_model(epoch)" }, { "identifier": "WGAN_CP", "path": "models/WGAN.py", "snippet": "class WGAN_CP(nn.Module):\n def __init__(self, args):\n super(WGAN_CP, self).__init__()\n self.G=WGANCP_G(args.hw,args.z_dim,args.in_channels)\n self.D=WGANCP_D(args.hw,args.in_channels)\n # self.G.weight_init()\n # self.D.weight_init()\n\n self.args=args\n self.batch_size=args.batch_size\n self.z_dim=args.z_dim\n\n # Attention!!! WGAN use RMSprop optimizer instead of Adam\n self.optim_g = optim.RMSprop(self.G.parameters(), lr=args.lr_g)\n self.optim_d = optim.RMSprop(self.D.parameters(), lr=args.lr_d)\n self.scheduler_optim_g=optim.lr_scheduler.MultiStepLR(self.optim_g, milestones=[100,150], gamma=0.9)\n self.scheduler_optim_d=optim.lr_scheduler.LambdaLR(self.optim_d, lr_lambda=self.warm_up)\n \n # Recording program start time for log directory naming\n program_begin_time = t.strftime('%Y-%m-%d %H:%M', t.localtime())\n # Logging information\n self.information=f'WGAN-{program_begin_time}'\n # TensorBoard SummaryWriter for logging\n self.writer=SummaryWriter(os.path.join(self.args.log_dir,self.information))\n\n def warm_up(self,epoch):\n \"\"\"\n Learning rate warm-up function for the RMSprop optimizer.\n\n Args:\n epoch (int): Current epoch number.\n\n Returns:\n float: Adjusted learning rate based on the warm-up strategy.\n \"\"\"\n top_epoch = int(self.args.num_epochs*0.3)\n if epoch<top_epoch:\n #In the first 30% of epochs, slowly increase the LR to the preset LR\n return (epoch+1) / top_epoch\n else:\n #Drop the LR to half of the preset\n return (1 -( 0.5 / (self.args.num_epochs - top_epoch) * (epoch - top_epoch) ) )\n\n def save_model(self,epoch):\n save_path=f'./save/{self.information}'\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n torch.save(self.G.state_dict(), f'{save_path}/generator_{epoch}epochs.pth')\n torch.save(self.D.state_dict(), f'{save_path}/discriminator_{epoch}epochs.pth')\n self.save_args(save_path)\n print(f'Models save to {save_path}/generator_{epoch}epochs.pth & {save_path}/discriminator_{epoch}epochs.pth ')\n\n def save_args(self,save_path):\n argsDict = self.args.__dict__\n with open(f'{save_path}/setting.txt', 'w') as f:\n f.writelines('------------------ start ------------------' + '\\n')\n for eachArg, value in argsDict.items():\n f.writelines(eachArg + ' : ' + str(value) + '\\n')\n f.writelines('------------------- end -------------------')\n\n\n def train(self,train_loader,device):\n \"\"\"\n Training function for the WGAN model.\n\n Args:\n train_loader (DataLoader): DataLoader for training data.\n device (torch.device): The device (CPU or GPU) to perform training.\n\n Returns:\n None\n \"\"\"\n\n # Move the model and loss to the specified device\n self.G.to(device)\n self.D.to(device)\n generator_iter = 0\n descriminator_iter = 0\n \n # Training loop\n for epoch in range(self.args.num_epochs):\n self.t_begin = t.time()\n pbar=tqdm(enumerate(train_loader),total=len(train_loader),ncols=100)\n\n for i, (images, _) in pbar:\n if i == train_loader.dataset.__len__() // self.batch_size:\n break\n\n # Generate random noise and labels\n z = torch.randn((self.batch_size, self.z_dim, 1, 1))\n real_labels = torch.ones(self.batch_size)\n fake_labels = torch.zeros(self.batch_size)\n\n # Move data to the specified device\n images=images.to(device)\n z=z.to(device)\n real_labels=real_labels.to(device)\n fake_labels=fake_labels.to(device)\n\n # Train Discriminator\n for p in self.D.parameters():\n p.data.clamp_(-self.args.wc, self.args.wc)\n\n\n d_loss_real = self.D(images)\n d_loss_real = d_loss_real.mean(0).view(1)\n\n fake_images = self.G(z)\n d_loss_fake = self.D(fake_images)\n d_loss_fake = d_loss_fake.mean(0).view(1)\n\n d_loss = d_loss_fake - d_loss_real\n Wasserstein_D = d_loss_real - d_loss_fake\n self.D.zero_grad()\n d_loss.backward()\n self.writer.add_scalar('D_loss', d_loss.item(), descriminator_iter)\n self.writer.add_scalar('Wasserstein_D', Wasserstein_D.item(), descriminator_iter)\n self.optim_d.step()\n descriminator_iter+=1\n\n # Train Generator\n if i % self.args.des_iter == 0:\n #print(\"i:\",i)\n self.D.zero_grad()\n self.G.zero_grad()\n\n z = torch.randn((self.batch_size, self.z_dim, 1, 1))\n z = z.to(device)\n\n fake_images = self.G(z)\n \n #print('fake_output:',fake_output)\n g_loss = self.D(fake_images)\n g_loss = g_loss.mean(0).view(1).mul(-1)\n g_loss.backward()\n pbar.set_postfix({'G_loss': g_loss.item(),'D_loss': d_loss.item()})\n #print('g_loss:',g_loss.item())\n self.optim_g.step()\n self.writer.add_scalar('G_loss', g_loss.item(), generator_iter)\n generator_iter+=1\n \n # Save generated images\n if generator_iter % 500 == 0:\n \n if not os.path.exists(f'./training_result_{self.args.dataset}-{self.information}/'):\n os.makedirs(f'./training_result_{self.args.dataset}-{self.information}/')\n\n z = torch.randn((self.batch_size,self.args.z_dim, 1, 1))\n z=z.to(device)\n samples = self.G(z)\n samples = samples.mul(0.5).add(0.5)\n samples = samples.data.cpu()[:25]\n grid = torchvision.utils.make_grid(samples,nrow=5)\n torchvision.utils.save_image(grid, './training_result_{}/img_generatori_iter_{}.png'.format(self.args.dataset+'-'+self.information,str(generator_iter).zfill(3)))\n \n # Learning rate scheduling\n self.scheduler_optim_d.step()\n self.scheduler_optim_g.step()\n\n # Print and log training information\n print(self.optim_d.state_dict()['param_groups'][0]['lr'])\n print(self.optim_g.state_dict()['param_groups'][0]['lr'])\n self.t_end = t.time()\n print(\n \"[Epoch %d/%d] [D loss: %f] [G loss: %f] [training time: %.3fseconds]\"\n % (epoch, self.args.num_epochs, d_loss.item(), g_loss.item() , (self.t_end - self.t_begin))\n )\n\n # Save the trained parameters\n if epoch % (self.args.num_epochs // 5) == 0 and epoch !=0:\n self.save_model(epoch)" }, { "identifier": "WGAN_GP", "path": "models/WGAN_GP.py", "snippet": "class WGAN_GP(nn.Module):\n def __init__(self, args):\n super(WGAN_GP, self).__init__()\n self.G=WGANGP_G(args.hw,args.z_dim,args.in_channels)\n self.D=WGANGP_D(args.hw,args.in_channels)\n # self.G.weight_init()\n # self.D.weight_init()\n\n self.args=args\n self.batch_size=args.batch_size\n self.z_dim=args.z_dim\n self.gp_lambda=args.gp_lambda\n\n \n self.optim_g = optim.Adam(self.G.parameters(), lr=args.lr_g, betas=args.betas)\n self.optim_d = optim.Adam(self.D.parameters(), lr=args.lr_d, betas=args.betas)\n self.scheduler_optim_g=optim.lr_scheduler.MultiStepLR(self.optim_g, milestones=[100,150], gamma=0.95)\n self.scheduler_optim_d=optim.lr_scheduler.LambdaLR(self.optim_d, lr_lambda=self.warm_up)\n \n # Recording program start time for log directory naming\n program_begin_time = t.strftime('%Y-%m-%d %H:%M', t.localtime())\n # Logging information\n self.information=f'WGAN_GP-{program_begin_time}'\n # TensorBoard SummaryWriter for logging\n self.writer=SummaryWriter(os.path.join(self.args.log_dir,self.information))\n\n def warm_up(self,epoch):\n \"\"\"\n Learning rate warm-up function for the Adam optimizer.\n\n Args:\n epoch (int): Current epoch number.\n\n Returns:\n float: Adjusted learning rate based on the warm-up strategy.\n \"\"\"\n top_epoch = int(self.args.num_epochs*0.3)\n if epoch<top_epoch:\n #In the first 30% of epochs, slowly increase the LR to the preset LR\n return (epoch+1) / top_epoch\n else:\n #Drop the LR to half of the preset\n return (1 -( 0.5 / (self.args.num_epochs - top_epoch) * (epoch - top_epoch) ) )\n\n def save_model(self,epoch):\n save_path=f'./save/{self.information}'\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n torch.save(self.G.state_dict(), f'{save_path}/generator_{epoch}epochs.pth')\n torch.save(self.D.state_dict(), f'{save_path}/discriminator_{epoch}epochs.pth')\n self.save_args(save_path)\n print(f'Models save to {save_path}/generator_{epoch}epochs.pth & {save_path}/discriminator_{epoch}epochs.pth ')\n\n def save_args(self,save_path):\n argsDict = self.args.__dict__\n with open(f'{save_path}/setting.txt', 'w') as f:\n f.writelines('------------------ start ------------------' + '\\n')\n for eachArg, value in argsDict.items():\n f.writelines(eachArg + ' : ' + str(value) + '\\n')\n f.writelines('------------------- end -------------------')\n\n\n def train(self,train_loader,device):\n \"\"\"\n Training function for the WGAN-GP model.\n\n Args:\n train_loader (DataLoader): DataLoader for training data.\n device (torch.device): The device (CPU or GPU) to perform training.\n\n Returns:\n None\n \"\"\"\n\n # Move the model and loss to the specified device\n self.G.to(device)\n self.D.to(device)\n generator_iter = 0\n descriminator_iter = 0\n \n # Training loop\n for epoch in range(self.args.num_epochs):\n self.t_begin = t.time()\n pbar=tqdm(enumerate(train_loader),total=len(train_loader),ncols=100)\n\n for i, (images, _) in pbar:\n if i == train_loader.dataset.__len__() // self.batch_size:\n break\n for p in self.D.parameters():\n p.requires_grad = True\n # Generate random noise and labels\n z = torch.randn((self.batch_size, self.z_dim, 1, 1))\n real_labels = torch.ones(self.batch_size)\n fake_labels = torch.zeros(self.batch_size)\n\n # Move data to the specified device\n images=images.to(device)\n z=z.to(device)\n real_labels=real_labels.to(device)\n fake_labels=fake_labels.to(device)\n\n # Train Discriminator\n d_loss_real = self.D(images)\n d_loss_real = d_loss_real.mean(0).view(1)\n\n fake_images = self.G(z)\n d_loss_fake = self.D(fake_images)\n d_loss_fake = d_loss_fake.mean(0).view(1)\n\n gradient_penalty = self.calculate_gradient_penalty(images.data, fake_images.data,device)\n\n d_loss = d_loss_fake - d_loss_real + gradient_penalty\n Wasserstein_D = d_loss_real - d_loss_fake\n self.D.zero_grad()\n d_loss.backward()\n self.writer.add_scalar('D_loss', d_loss.item(), descriminator_iter)\n self.writer.add_scalar('Wasserstein_D', Wasserstein_D.item(), descriminator_iter)\n self.optim_d.step()\n descriminator_iter+=1\n\n # Train Generator\n if i % self.args.des_iter == 0:\n \n for p in self.D.parameters():\n p.requires_grad = False # to avoid computation\n\n #print(\"i:\",i)\n self.D.zero_grad()\n self.G.zero_grad()\n\n z = torch.randn((self.batch_size, self.z_dim, 1, 1))\n z = z.to(device)\n\n fake_images = self.G(z)\n \n #print('fake_output:',fake_output)\n g_loss = self.D(fake_images)\n g_loss = g_loss.mean(0).view(1).mul(-1)\n g_loss.backward()\n pbar.set_postfix({'G_loss': g_loss.item(),'D_loss': d_loss.item()})\n #print('g_loss:',g_loss.item())\n self.optim_g.step()\n self.writer.add_scalar('G_loss', g_loss.item(), generator_iter)\n generator_iter+=1\n \n # Save generated images\n if generator_iter % 500 == 0:\n \n if not os.path.exists(f'./training_result_{self.args.dataset}-{self.information}/'):\n os.makedirs(f'./training_result_{self.args.dataset}-{self.information}/')\n\n z = torch.randn((self.batch_size,self.args.z_dim, 1, 1))\n z=z.to(device)\n samples = self.G(z)\n samples = samples.mul(0.5).add(0.5)\n samples = samples.data.cpu()[:25]\n grid = torchvision.utils.make_grid(samples,nrow=5)\n torchvision.utils.save_image(grid, './training_result_{}/img_generatori_iter_{}.png'.format(self.args.dataset+'-'+self.information,str(generator_iter).zfill(3)))\n \n # Learning rate scheduling\n self.scheduler_optim_d.step()\n self.scheduler_optim_g.step()\n\n # Print and log training information\n print(self.optim_d.state_dict()['param_groups'][0]['lr'])\n print(self.optim_g.state_dict()['param_groups'][0]['lr'])\n self.t_end = t.time()\n print(\n \"[Epoch %d/%d] [D loss: %f] [G loss: %f] [training time: %.3fseconds]\"\n % (epoch, self.args.num_epochs, d_loss.item(), g_loss.item() , (self.t_end - self.t_begin))\n )\n\n # Save the trained parameters\n if epoch % (self.args.num_epochs // 5) == 0 and epoch !=0:\n self.save_model(epoch)\n\n\n def calculate_gradient_penalty(self, real_images, fake_images, device):\n eta = torch.FloatTensor(self.batch_size,1,1,1).uniform_(0,1)\n eta = eta.expand(self.batch_size, real_images.size(1), real_images.size(2), real_images.size(3))\n eta=eta.to(device)\n\n interpolated = eta * real_images + ((1 - eta) * fake_images)\n interpolated.requires_grad_(True)\n\n # calculate probability of interpolated examples\n prob_interpolated = self.D(interpolated)\n\n grad_outputs=torch.ones(prob_interpolated.size()).to(device)\n grad_outputs.requires_grad_(True)\n # calculate gradients of probabilities with respect to examples\n gradients = autograd.grad(outputs=prob_interpolated, inputs=interpolated,\n grad_outputs=grad_outputs,\n create_graph=True, retain_graph=True)[0]\n\n grad_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * self.gp_lambda\n return grad_penalty" } ]
import torch from models.DCGAN import DCGAN from models.GAN import GAN from models.WGAN import WGAN_CP from models.WGAN_GP import WGAN_GP
7,217
def get_model(args): if args.model == 'DCGAN': net=DCGAN(args) elif args.model == 'GAN': net=GAN(args) elif args.model == 'WGAN-CP': net=WGAN_CP(args) elif args.model == 'WGAN-GP':
def get_model(args): if args.model == 'DCGAN': net=DCGAN(args) elif args.model == 'GAN': net=GAN(args) elif args.model == 'WGAN-CP': net=WGAN_CP(args) elif args.model == 'WGAN-GP':
net=WGAN_GP(args)
3
2023-12-12 06:24:31+00:00
12k
tommy-xq/SA2VP
vpt_main/src/models/build_model.py
[ { "identifier": "ResNet", "path": "vpt_main/src/models/resnet.py", "snippet": "class ResNet(nn.Module):\n \"\"\"ResNet model.\"\"\"\n\n def __init__(self, cfg):\n super(ResNet, self).__init__()\n self.cfg = cfg\n\n model_type = cfg.DATA.FEATURE\n model = self.get_pretrained_model(model_type)\n\n if \"prompt\" in cfg.MODEL.TRANSFER_TYPE:\n # setup prompt_embd and modify model accordingly\n model = self.setup_prompt(cfg.MODEL.PROMPT, model)\n else:\n self.prompt_embeddings = None\n\n # setup side network if needed\n self.setup_side()\n # set which parameters require grad\n # creat self.prompt_layers, self.frozen_layers, self.tuned_layers\n self.setup_grad(model)\n # create self.head\n self.setup_head(cfg)\n\n def setup_side(self):\n if self.cfg.MODEL.TRANSFER_TYPE != \"side\":\n self.side = None\n else:\n self.side_alpha = nn.Parameter(torch.tensor(0.0))\n out_dim = self.get_outputdim()\n m = models.alexnet(pretrained=True)\n self.side = nn.Sequential(OrderedDict([\n (\"features\", m.features),\n (\"avgpool\", m.avgpool),\n ]))\n self.side_projection = nn.Linear(9216, out_dim, bias=False)\n\n def setup_grad(self, model):\n transfer_type = self.cfg.MODEL.TRANSFER_TYPE\n # split enc into 3 parts:\n # prompt_layers frozen_layers tuned_layers\n # partial-1 identity -layer3 layer4\n # partial-2: identity -layer2 \"layer4\" \"layer3\"\n # partial-3: identity -layer1 \"layer4\" \"layer3\" \"layer2\"\n # linear identity all identity\n # end2end identity identity all\n\n # prompt-below conv1 all but conv1\n # prompt-pad identity all\n\n if transfer_type == \"prompt\" and self.cfg.MODEL.PROMPT.LOCATION == \"below\": # noqa\n self.prompt_layers = nn.Sequential(OrderedDict([\n (\"conv1\", model.conv1),\n (\"bn1\", model.bn1),\n (\"relu\", model.relu),\n (\"maxpool\", model.maxpool),\n ]))\n self.frozen_layers = nn.Sequential(OrderedDict([\n (\"layer1\", model.layer1),\n (\"layer2\", model.layer2),\n (\"layer3\", model.layer3),\n (\"layer4\", model.layer4),\n (\"avgpool\", model.avgpool),\n ]))\n self.tuned_layers = nn.Identity()\n else:\n # partial, linear, end2end, prompt-pad\n self.prompt_layers = nn.Identity()\n\n if transfer_type == \"partial-0\":\n # last conv block\n self.frozen_layers = nn.Sequential(OrderedDict([\n (\"conv1\", model.conv1),\n (\"bn1\", model.bn1),\n (\"relu\", model.relu),\n (\"maxpool\", model.maxpool),\n (\"layer1\", model.layer1),\n (\"layer2\", model.layer2),\n (\"layer3\", model.layer3),\n (\"layer4\", model.layer4[:-1]),\n ]))\n self.tuned_layers = nn.Sequential(OrderedDict([\n (\"layer4\", model.layer4[-1]),\n (\"avgpool\", model.avgpool),\n ]))\n elif transfer_type == \"partial-1\":\n # tune last layer\n self.frozen_layers = nn.Sequential(OrderedDict([\n (\"conv1\", model.conv1),\n (\"bn1\", model.bn1),\n (\"relu\", model.relu),\n (\"maxpool\", model.maxpool),\n (\"layer1\", model.layer1),\n (\"layer2\", model.layer2),\n (\"layer3\", model.layer3),\n ]))\n self.tuned_layers = nn.Sequential(OrderedDict([\n (\"layer4\", model.layer4),\n (\"avgpool\", model.avgpool),\n ]))\n\n elif transfer_type == \"partial-2\":\n self.frozen_layers = nn.Sequential(OrderedDict([\n (\"conv1\", model.conv1),\n (\"bn1\", model.bn1),\n (\"relu\", model.relu),\n (\"maxpool\", model.maxpool),\n (\"layer1\", model.layer1),\n (\"layer2\", model.layer2),\n ]))\n self.tuned_layers = nn.Sequential(OrderedDict([\n (\"layer3\", model.layer3),\n (\"layer4\", model.layer4),\n (\"avgpool\", model.avgpool),\n ]))\n\n elif transfer_type == \"partial-3\":\n self.frozen_layers = nn.Sequential(OrderedDict([\n (\"conv1\", model.conv1),\n (\"bn1\", model.bn1),\n (\"relu\", model.relu),\n (\"maxpool\", model.maxpool),\n (\"layer1\", model.layer1),\n ]))\n self.tuned_layers = nn.Sequential(OrderedDict([\n (\"layer2\", model.layer2),\n (\"layer3\", model.layer3),\n (\"layer4\", model.layer4),\n (\"avgpool\", model.avgpool),\n ]))\n\n elif transfer_type == \"linear\" or transfer_type == \"side\" or transfer_type == \"tinytl-bias\":\n self.frozen_layers = nn.Sequential(OrderedDict([\n (\"conv1\", model.conv1),\n (\"bn1\", model.bn1),\n (\"relu\", model.relu),\n (\"maxpool\", model.maxpool),\n (\"layer1\", model.layer1),\n (\"layer2\", model.layer2),\n (\"layer3\", model.layer3),\n (\"layer4\", model.layer4),\n (\"avgpool\", model.avgpool),\n ]))\n self.tuned_layers = nn.Identity()\n\n elif transfer_type == \"end2end\":\n self.frozen_layers = nn.Identity()\n self.tuned_layers = nn.Sequential(OrderedDict([\n (\"conv1\", model.conv1),\n (\"bn1\", model.bn1),\n (\"relu\", model.relu),\n (\"maxpool\", model.maxpool),\n (\"layer1\", model.layer1),\n (\"layer2\", model.layer2),\n (\"layer3\", model.layer3),\n (\"layer4\", model.layer4),\n (\"avgpool\", model.avgpool),\n ]))\n\n elif transfer_type == \"prompt\" and self.cfg.MODEL.PROMPT.LOCATION == \"pad\": # noqa\n self.frozen_layers = nn.Sequential(OrderedDict([\n (\"conv1\", model.conv1),\n (\"bn1\", model.bn1),\n (\"relu\", model.relu),\n (\"maxpool\", model.maxpool),\n (\"layer1\", model.layer1),\n (\"layer2\", model.layer2),\n (\"layer3\", model.layer3),\n (\"layer4\", model.layer4),\n (\"avgpool\", model.avgpool),\n ]))\n self.tuned_layers = nn.Identity()\n\n if transfer_type == \"tinytl-bias\":\n for k, p in self.frozen_layers.named_parameters():\n if 'bias' not in k:\n p.requires_grad = False\n else:\n for k, p in self.frozen_layers.named_parameters():\n p.requires_grad = False\n self.transfer_type = transfer_type\n\n def setup_prompt(self, prompt_config, model):\n # ONLY support below and pad\n self.prompt_location = prompt_config.LOCATION\n self.num_tokens = prompt_config.NUM_TOKENS\n if prompt_config.LOCATION == \"below\":\n return self._setup_prompt_below(prompt_config, model)\n elif prompt_config.LOCATION == \"pad\":\n return self._setup_prompt_pad(prompt_config, model)\n else:\n raise ValueError(\n \"ResNet models cannot use prompt location {}\".format(\n prompt_config.LOCATION))\n\n def _setup_prompt_below(self, prompt_config, model):\n if prompt_config.INITIATION == \"random\":\n self.prompt_embeddings = nn.Parameter(torch.zeros(\n 1, self.num_tokens,\n self.cfg.DATA.CROPSIZE, self.cfg.DATA.CROPSIZE\n ))\n nn.init.uniform_(self.prompt_embeddings.data, 0.0, 1.0)\n self.prompt_norm = tv.transforms.Normalize(\n mean=[sum([0.485, 0.456, 0.406])/3] * self.num_tokens,\n std=[sum([0.229, 0.224, 0.225])/3] * self.num_tokens,\n )\n\n elif prompt_config.INITIATION == \"gaussian\":\n self.prompt_embeddings = nn.Parameter(torch.zeros(\n 1, self.num_tokens,\n self.cfg.DATA.CROPSIZE, self.cfg.DATA.CROPSIZE\n ))\n\n nn.init.normal_(self.prompt_embeddings.data)\n\n self.prompt_norm = nn.Identity()\n\n else:\n raise ValueError(\"Other initiation scheme is not supported\")\n\n # modify first conv layer\n old_weight = model.conv1.weight # [64, 3, 7, 7]\n model.conv1 = nn.Conv2d(\n self.num_tokens+3, 64, kernel_size=7,\n stride=2, padding=3, bias=False\n )\n torch.nn.init.xavier_uniform(model.conv1.weight)\n\n model.conv1.weight[:, :3, :, :].data.copy_(old_weight)\n return model\n\n def _setup_prompt_pad(self, prompt_config, model):\n if prompt_config.INITIATION == \"random\":\n self.prompt_embeddings_tb = nn.Parameter(torch.zeros(\n 1, 3, 2 * self.num_tokens,\n self.cfg.DATA.CROPSIZE + 2 * self.num_tokens\n ))\n self.prompt_embeddings_lr = nn.Parameter(torch.zeros(\n 1, 3, self.cfg.DATA.CROPSIZE, 2 * self.num_tokens\n ))\n\n nn.init.uniform_(self.prompt_embeddings_tb.data, 0.0, 1.0)\n nn.init.uniform_(self.prompt_embeddings_lr.data, 0.0, 1.0)\n\n self.prompt_norm = tv.transforms.Normalize(\n mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225],\n )\n\n elif prompt_config.INITIATION == \"gaussian\":\n self.prompt_embeddings_tb = nn.Parameter(torch.zeros(\n 1, 3, 2 * self.num_tokens,\n self.cfg.DATA.CROPSIZE + 2 * self.num_tokens\n ))\n self.prompt_embeddings_lr = nn.Parameter(torch.zeros(\n 1, 3, self.cfg.DATA.CROPSIZE, 2 * self.num_tokens\n ))\n\n nn.init.normal_(self.prompt_embeddings_tb.data)\n nn.init.normal_(self.prompt_embeddings_lr.data)\n\n self.prompt_norm = nn.Identity()\n else:\n raise ValueError(\"Other initiation scheme is not supported\")\n return model\n\n def get_pretrained_model(self, model_type):\n model_root = self.cfg.MODEL.MODEL_ROOT\n\n if model_type == \"imagenet_sup_rn50\":\n model = models.resnet50(pretrained=True)\n elif model_type == \"imagenet_sup_rn101\":\n model = models.resnet101(pretrained=True) # 2048\n elif model_type == \"imagenet_sup_rn152\":\n model = models.resnet152(pretrained=True) # 2048\n elif model_type == \"imagenet_sup_rn34\":\n model = models.resnet34(pretrained=True) # 512\n elif model_type == \"imagenet_sup_rn18\":\n model = models.resnet18(pretrained=True) # 512\n\n elif model_type == \"inat2021_sup_rn50\":\n checkpoint = torch.load(\n f\"{model_root}/inat2021_supervised_large.pth.tar\",\n map_location=torch.device('cpu')\n )\n model = models.resnet50(pretrained=False)\n model.fc = torch.nn.Linear(model.fc.in_features, 10000)\n model.load_state_dict(checkpoint['state_dict'], strict=True)\n elif model_type == 'inat2021_mini_sup_rn50':\n model = models.resnet50(pretrained=False)\n model.fc = torch.nn.Linear(model.fc.in_features, 10000)\n checkpoint = torch.load(\n f\"{model_root}/inat2021_supervised_mini.pth.tar\",\n map_location=torch.device('cpu')\n )\n model.load_state_dict(checkpoint['state_dict'], strict=True)\n\n elif model_type == 'inat2021_mini_moco_v2_rn50':\n model = models.resnet50(pretrained=False)\n model.fc = torch.nn.Identity()\n checkpoint = torch.load(\n f\"{model_root}/inat2021_moco_v2_mini_1000_ep.pth.tar\",\n map_location=\"cpu\")\n\n # rename moco pre-trained keys\n state_dict = checkpoint['state_dict']\n for k in list(state_dict.keys()):\n # retain only encoder_q up to before the embedding layer\n if k.startswith('module.encoder_q') and not k.startswith('module.encoder_q.fc'):\n # remove prefix\n state_dict[k[len(\"module.encoder_q.\"):]] = state_dict[k]\n # delete renamed or unused k\n del state_dict[k]\n\n model.load_state_dict(state_dict, strict=True)\n\n elif model_type == 'imagenet_moco_v2_rn50':\n model = models.resnet50(pretrained=False)\n model.fc = torch.nn.Identity()\n checkpoint = torch.load(\n f\"{model_root}/imagenet_moco_v2_800ep_pretrain.pth.tar\",\n map_location=\"cpu\")\n\n # rename moco pre-trained keys\n state_dict = checkpoint['state_dict']\n for k in list(state_dict.keys()):\n # retain only encoder_q up to before the embedding layer\n if k.startswith('module.encoder_q') and not k.startswith('module.encoder_q.fc'):\n # remove prefix\n state_dict[k[len(\"module.encoder_q.\"):]] = state_dict[k]\n # delete renamed or unused k\n del state_dict[k]\n\n model.load_state_dict(state_dict, strict=True)\n\n elif model_type.startswith(\"mocov3_rn50\"):\n moco_epoch = model_type.split(\"ep\")[-1]\n checkpoint = torch.load(\n f\"{model_root}/mocov3_linear-1000ep.pth.tar\",\n map_location=\"cpu\")\n state_dict = checkpoint['state_dict']\n for k in list(state_dict.keys()):\n # retain only base_encoder up to before the embedding layer\n if k.startswith('module.'):\n # remove prefix\n state_dict[k[len(\"module.\"):]] = state_dict[k]\n # delete renamed or unused k\n del state_dict[k]\n model = models.resnet50()\n model.load_state_dict(state_dict, strict=False)\n\n else:\n raise ValueError(\"model type not supported for resnet backbone\")\n\n model.fc = nn.Identity()\n return model\n\n def get_outputdim(self):\n if self.cfg.DATA.FEATURE == \"imagenet_sup_rn34\" or self.cfg.DATA.FEATURE == \"imagenet_sup_rn18\":\n out_dim = 512\n else:\n out_dim = 2048\n return out_dim\n\n def setup_head(self, cfg):\n out_dim = self.get_outputdim()\n self.head = MLP(\n input_dim=out_dim,\n mlp_dims=[out_dim] * self.cfg.MODEL.MLP_NUM + \\\n [cfg.DATA.NUMBER_CLASSES],\n special_bias=True\n )\n\n def incorporate_prompt(self, x):\n B = x.shape[0]\n if self.prompt_location == \"below\":\n x = torch.cat((\n x,\n self.prompt_norm(\n self.prompt_embeddings).expand(B, -1, -1, -1),\n ), dim=1)\n # (B, 3 + num_prompts, crop_size, crop_size)\n\n elif self.prompt_location == \"pad\":\n prompt_emb_lr = self.prompt_norm(\n self.prompt_embeddings_lr).expand(B, -1, -1, -1)\n prompt_emb_tb = self.prompt_norm(\n self.prompt_embeddings_tb).expand(B, -1, -1, -1)\n\n x = torch.cat((\n prompt_emb_lr[:, :, :, :self.num_tokens],\n x, prompt_emb_lr[:, :, :, self.num_tokens:]\n ), dim=-1)\n x = torch.cat((\n prompt_emb_tb[:, :, :self.num_tokens, :],\n x, prompt_emb_tb[:, :, self.num_tokens:, :]\n ), dim=-2)\n # (B, 3, crop_size + num_prompts, crop_size + num_prompts)\n else:\n raise ValueError(\"not supported yet\")\n x = self.prompt_layers(x)\n return x\n\n def forward(self, x, return_feature=False):\n if self.side is not None:\n side_output = self.side(x)\n side_output = side_output.view(side_output.size(0), -1)\n side_output = self.side_projection(side_output)\n\n x = self.get_features(x)\n\n if self.side is not None:\n alpha_squashed = torch.sigmoid(self.side_alpha)\n x = alpha_squashed * x + (1 - alpha_squashed) * side_output\n\n if return_feature:\n return x\n\n return self.head(x)\n\n def get_features(self, x):\n \"\"\"get a (batch_size, 2048) feature\"\"\"\n if self.frozen_layers.training:\n self.frozen_layers.eval()\n\n if \"prompt\" not in self.transfer_type:\n with torch.set_grad_enabled(self.frozen_layers.training):\n x = self.frozen_layers(x)\n else:\n # prompt tuning required frozen_layers saving grad\n x = self.incorporate_prompt(x)\n x = self.frozen_layers(x)\n\n x = self.tuned_layers(x) # batch_size x 2048 x 1\n x = x.view(x.size(0), -1)\n\n return x" }, { "identifier": "ConvNeXt", "path": "vpt_main/src/models/convnext.py", "snippet": "class ConvNeXt(ResNet):\n \"\"\"\n ConvNeXt model,\n utilizing the ResNet class for structure and prompt setup\n \"\"\"\n\n def __init__(self, cfg):\n if cfg.DATA.FEATURE not in [\n \"imagenet_sup_rnx_tiny\",\n \"imagenet_sup_rnx_small\",\n \"imagenet_sup_rnx_base\",\n \"imagenet22k_sup_rnx_base\",\n \"imagenet22k_sup_rnx_large\",\n \"imagenet22k_sup_rnx_xlarge\",\n ]:\n raise ValueError(\"feature does not support ConvNeXt models\")\n if cfg.MODEL.PROMPT.LOCATION == \"below\":\n raise ValueError(\"Not support prompt-below at the moment\")\n super(ConvNeXt, self).__init__(cfg)\n\n def get_outputdim(self):\n backbone_arch = self.cfg.DATA.FEATURE.split(\"_\")[-1]\n return FEAT2DIM[backbone_arch]\n\n def setup_grad(self, model):\n # TODO: change the name of layers\n \"\"\"\n downsample_layers[0], stages[0]\n downsample_layers[1], stages[1]\n downsample_layers[2], stages[2]\n downsample_layers[3], stages[3]\n norm\n \"\"\"\n self.norm = model.norm\n transfer_type = self.cfg.MODEL.TRANSFER_TYPE\n # split enc into 3 parts:\n # prompt_layers frozen_layers tuned_layers\n # partial-0 identity all but last block\n # stages[-1][-1], stages[-1][-1] + norm\n # linear identity all identity\n # end2end identity identity all\n # prompt-pad identity all\n\n # partial, linear, end2end, prompt-pad\n self.prompt_layers = nn.Identity()\n\n if transfer_type == \"partial-0\":\n # last block to tune\n self.frozen_layers = nn.Sequential(OrderedDict([\n (\"downsample_layer1\", model.downsample_layers[0]),\n (\"stage1\", model.stages[0]),\n (\"downsample_layer2\", model.downsample_layers[1]),\n (\"stage2\", model.stages[1]),\n (\"downsample_layer3\", model.downsample_layers[2]),\n (\"stage3\", model.stages[2]),\n (\"downsample_layer4\", model.downsample_layers[3]),\n (\"stage4\", model.stages[3][:-1]),\n ]))\n self.tuned_layers = nn.Sequential(OrderedDict([\n (\"stage4\", model.stages[3][-1]),\n ]))\n self.tune_norm = True\n\n elif transfer_type == \"linear\" or transfer_type == \"side\" or transfer_type == \"tinytl-bias\": # noqa\n self.frozen_layers = nn.Sequential(OrderedDict([\n (\"downsample_layer1\", model.downsample_layers[0]),\n (\"stage1\", model.stages[0]),\n (\"downsample_layer2\", model.downsample_layers[1]),\n (\"stage2\", model.stages[1]),\n (\"downsample_layer3\", model.downsample_layers[2]),\n (\"stage3\", model.stages[2]),\n (\"downsample_layer4\", model.downsample_layers[3]),\n (\"stage4\", model.stages[3]),\n ]))\n self.tuned_layers = nn.Identity()\n self.tune_norm = False\n\n elif transfer_type == \"end2end\":\n self.frozen_layers = nn.Identity()\n self.tuned_layers = nn.Sequential(OrderedDict([\n (\"downsample_layer1\", model.downsample_layers[0]),\n (\"stage1\", model.stages[0]),\n (\"downsample_layer2\", model.downsample_layers[1]),\n (\"stage2\", model.stages[1]),\n (\"downsample_layer3\", model.downsample_layers[2]),\n (\"stage3\", model.stages[2]),\n (\"downsample_layer4\", model.downsample_layers[3]),\n (\"stage4\", model.stages[3]),\n ]))\n self.tune_norm = True\n\n elif transfer_type == \"prompt\" and self.cfg.MODEL.PROMPT.LOCATION == \"pad\": # noqa\n self.frozen_layers = nn.Sequential(OrderedDict([\n (\"downsample_layer1\", model.downsample_layers[0]),\n (\"stage1\", model.stages[0]),\n (\"downsample_layer2\", model.downsample_layers[1]),\n (\"stage2\", model.stages[1]),\n (\"downsample_layer3\", model.downsample_layers[2]),\n (\"stage3\", model.stages[2]),\n (\"downsample_layer4\", model.downsample_layers[3]),\n (\"stage4\", model.stages[3]),\n ]))\n self.tuned_layers = nn.Identity()\n self.tune_norm = False\n\n if transfer_type == \"tinytl-bias\":\n for k, p in self.frozen_layers.named_parameters():\n if 'bias' not in k:\n p.requires_grad = False\n else:\n for k, p in self.frozen_layers.named_parameters():\n p.requires_grad = False\n\n if not self.tune_norm:\n for k, p in self.norm.named_parameters():\n p.requires_grad = False\n self.transfer_type = transfer_type\n\n def _setup_prompt_below(self, prompt_config, model):\n # TODO:\n # the only difference btw this function and that of the ResNet class is the name of the first layer\n if prompt_config.INITIATION == \"random\":\n self.prompt_embeddings = nn.Parameter(torch.zeros(\n 1, self.num_tokens,\n self.cfg.DATA.CROPSIZE, self.cfg.DATA.CROPSIZE\n ))\n nn.init.uniform_(self.prompt_embeddings.data, 0.0, 1.0)\n self.prompt_norm = tv.transforms.Normalize(\n mean=[sum([0.485, 0.456, 0.406])/3] * self.num_tokens,\n std=[sum([0.229, 0.224, 0.225])/3] * self.num_tokens,\n )\n\n elif prompt_config.INITIATION == \"gaussian\":\n self.prompt_embeddings = nn.Parameter(torch.zeros(\n 1, self.num_tokens,\n self.cfg.DATA.CROPSIZE, self.cfg.DATA.CROPSIZE\n ))\n\n nn.init.normal_(self.prompt_embeddings.data)\n self.prompt_norm = nn.Identity()\n\n else:\n raise ValueError(\"Other initiation scheme is not supported\")\n\n # modify first conv layer\n old_weight = model.downsample_layers[0][0].weight # [*, 3, 4, 4]\n model.downsample_layers[0][0] = nn.Conv2d(\n self.num_tokens+3, old_weight.shape[0], kernel_size=4, stride=4\n )\n trunc_normal_(model.downsample_layers[0][0].weight, std=.02)\n torch.nn.init.constant_(model.downsample_layers[0][0].bias, 0)\n\n model.downsample_layers[0][0].weight[:, :3, :, :].data.copy_(old_weight)\n return model\n\n def get_pretrained_model(self, model_type):\n backbone_arch = model_type.split(\"_\")[-1]\n is_22k = \"22k\" in model_type\n if is_22k:\n # need to specify num_classes, o.w. throw error of weight size mismatch\n num_classes = 21841\n else:\n num_classes = 1000\n\n if backbone_arch == \"tiny\":\n model = convnext_tiny(pretrained=True)\n elif backbone_arch == \"small\":\n model = convnext_small(pretrained=True)\n elif backbone_arch == \"base\":\n model = convnext_base(\n pretrained=True, in_22k=is_22k, num_classes=num_classes)\n elif backbone_arch == \"large\":\n model = convnext_large(\n pretrained=True, in_22k=is_22k, num_classes=num_classes)\n elif backbone_arch == \"xlarge\":\n model = convnext_xlarge(\n pretrained=True, in_22k=is_22k, num_classes=num_classes)\n else:\n raise ValueError(\"model type not supported for resnet backbone\")\n\n model.head = nn.Identity()\n return model\n\n def get_features(self, x):\n \"\"\"get a (batch_size, feat_dim) feature\"\"\"\n if self.frozen_layers.training:\n self.frozen_layers.eval()\n\n if \"prompt\" not in self.transfer_type:\n with torch.set_grad_enabled(self.frozen_layers.training):\n x = self.frozen_layers(x)\n else:\n # prompt tuning required frozen_layers saving grad\n x = self.incorporate_prompt(x)\n x = self.frozen_layers(x)\n\n x = self.tuned_layers(x) # batch_size x 2048 x h x w\n x = self.norm(x.mean([-2, -1])) # global average pooling, (N, C, H, W) -> (N, C)\n return x" }, { "identifier": "ViT", "path": "vpt_main/src/models/vit_models.py", "snippet": "class ViT(nn.Module):\n \"\"\"ViT-related model.\"\"\"\n\n def __init__(self, cfg, load_pretrain=True, vis=False):\n super(ViT, self).__init__()\n\n if \"prompt\" in cfg.MODEL.TRANSFER_TYPE:\n prompt_cfg = cfg.MODEL.PROMPT\n else:\n prompt_cfg = None\n\n if cfg.MODEL.TRANSFER_TYPE != \"end2end\" and \"prompt\" not in cfg.MODEL.TRANSFER_TYPE:\n # linear, cls, tiny-tl, parital, adapter\n self.froze_enc = True\n else:\n # prompt, end2end, cls+prompt\n self.froze_enc = False\n \n if cfg.MODEL.TRANSFER_TYPE == \"adapter\":\n adapter_cfg = cfg.MODEL.ADAPTER\n else:\n adapter_cfg = None\n\n self.build_backbone(\n prompt_cfg, cfg, adapter_cfg, load_pretrain, vis=vis)\n self.cfg = cfg\n self.setup_side()\n self.setup_head(cfg)\n\n def setup_side(self):\n if self.cfg.MODEL.TRANSFER_TYPE != \"side\":\n self.side = None\n else:\n self.side_alpha = nn.Parameter(torch.tensor(0.0))\n m = models.alexnet(pretrained=True)\n self.side = nn.Sequential(OrderedDict([\n (\"features\", m.features),\n (\"avgpool\", m.avgpool),\n ]))\n self.side_projection = nn.Linear(9216, self.feat_dim, bias=False)\n\n def build_backbone(self, prompt_cfg, cfg, adapter_cfg, load_pretrain, vis):\n transfer_type = cfg.MODEL.TRANSFER_TYPE\n self.enc, self.feat_dim = build_vit_sup_models(\n cfg.DATA.FEATURE, cfg.DATA.CROPSIZE, prompt_cfg, cfg.MODEL.MODEL_ROOT, adapter_cfg, load_pretrain, vis\n )\n\n # linear, prompt, cls, cls+prompt, partial_1\n if transfer_type == \"partial-1\":\n total_layer = len(self.enc.transformer.encoder.layer)\n # tuned_params = [\n # \"transformer.encoder.layer.{}\".format(i-1) for i in range(total_layer)]\n for k, p in self.enc.named_parameters():\n if \"transformer.encoder.layer.{}\".format(total_layer - 1) not in k and \"transformer.encoder.encoder_norm\" not in k: # noqa\n p.requires_grad = False\n elif transfer_type == \"partial-2\":\n total_layer = len(self.enc.transformer.encoder.layer)\n for k, p in self.enc.named_parameters():\n if \"transformer.encoder.layer.{}\".format(total_layer - 1) not in k and \"transformer.encoder.layer.{}\".format(total_layer - 2) not in k and \"transformer.encoder.encoder_norm\" not in k: # noqa\n p.requires_grad = False\n\n elif transfer_type == \"partial-4\":\n total_layer = len(self.enc.transformer.encoder.layer)\n for k, p in self.enc.named_parameters():\n if \"transformer.encoder.layer.{}\".format(total_layer - 1) not in k and \"transformer.encoder.layer.{}\".format(total_layer - 2) not in k and \"transformer.encoder.layer.{}\".format(total_layer - 3) not in k and \"transformer.encoder.layer.{}\".format(total_layer - 4) not in k and \"transformer.encoder.encoder_norm\" not in k: # noqa\n p.requires_grad = False\n\n elif transfer_type == \"linear\" or transfer_type == \"side\":\n for k, p in self.enc.named_parameters():\n p.requires_grad = False\n\n elif transfer_type == \"tinytl-bias\":\n for k, p in self.enc.named_parameters():\n if 'bias' not in k:\n p.requires_grad = False\n\n elif transfer_type == \"prompt\" and prompt_cfg.LOCATION == \"below\":\n for k, p in self.enc.named_parameters():\n if \"prompt\" not in k and \"embeddings.patch_embeddings.weight\" not in k and \"embeddings.patch_embeddings.bias\" not in k:\n p.requires_grad = False\n\n elif transfer_type == \"prompt\":\n for k, p in self.enc.named_parameters():\n if \"prompt\" not in k:\n p.requires_grad = False\n\n elif transfer_type == \"prompt+bias\":\n for k, p in self.enc.named_parameters():\n if \"prompt\" not in k and 'bias' not in k:\n p.requires_grad = False\n\n elif transfer_type == \"prompt-noupdate\":\n for k, p in self.enc.named_parameters():\n p.requires_grad = False\n\n elif transfer_type == \"cls\":\n for k, p in self.enc.named_parameters():\n if \"cls_token\" not in k:\n p.requires_grad = False\n\n elif transfer_type == \"cls-reinit\":\n nn.init.normal_(\n self.enc.transformer.embeddings.cls_token,\n std=1e-6\n )\n\n for k, p in self.enc.named_parameters():\n if \"cls_token\" not in k:\n p.requires_grad = False\n\n elif transfer_type == \"cls+prompt\":\n for k, p in self.enc.named_parameters():\n if \"prompt\" not in k and \"cls_token\" not in k:\n p.requires_grad = False\n\n elif transfer_type == \"cls-reinit+prompt\":\n nn.init.normal_(\n self.enc.transformer.embeddings.cls_token,\n std=1e-6\n )\n for k, p in self.enc.named_parameters():\n if \"prompt\" not in k and \"cls_token\" not in k:\n p.requires_grad = False\n \n # adapter\n elif transfer_type == \"adapter\":\n for k, p in self.enc.named_parameters():\n if \"adapter\" not in k:\n p.requires_grad = False\n\n elif transfer_type == \"end2end\":\n logger.info(\"Enable all parameters update during training\")\n\n else:\n raise ValueError(\"transfer type {} is not supported\".format(\n transfer_type))\n\n def setup_head(self, cfg):\n self.head = MLP(\n input_dim=self.feat_dim,\n mlp_dims=[self.feat_dim] * self.cfg.MODEL.MLP_NUM + \\\n [cfg.DATA.NUMBER_CLASSES], # noqa\n special_bias=True\n )\n\n def forward(self, x, return_feature=False):\n if self.side is not None:\n side_output = self.side(x)\n side_output = side_output.view(side_output.size(0), -1)\n side_output = self.side_projection(side_output)\n\n if self.froze_enc and self.enc.training:\n self.enc.eval()\n x = self.enc(x) # batch_size x self.feat_dim\n\n if self.side is not None:\n alpha_squashed = torch.sigmoid(self.side_alpha)\n x = alpha_squashed * x + (1 - alpha_squashed) * side_output\n\n if return_feature:\n return x, x\n x = self.head(x)\n\n return x\n \n def forward_cls_layerwise(self, x):\n cls_embeds = self.enc.forward_cls_layerwise(x)\n return cls_embeds\n\n def get_features(self, x):\n \"\"\"get a (batch_size, self.feat_dim) feature\"\"\"\n x = self.enc(x) # batch_size x self.feat_dim\n return x" }, { "identifier": "Swin", "path": "vpt_main/src/models/vit_models.py", "snippet": "class Swin(ViT):\n \"\"\"Swin-related model.\"\"\"\n\n def __init__(self, cfg):\n super(Swin, self).__init__(cfg)\n\n def build_backbone(self, prompt_cfg, cfg, adapter_cfg, load_pretrain, vis):\n transfer_type = cfg.MODEL.TRANSFER_TYPE\n self.enc, self.feat_dim = build_swin_model(\n cfg.DATA.FEATURE, cfg.DATA.CROPSIZE,\n prompt_cfg, cfg.MODEL.MODEL_ROOT\n )\n\n # linear, prompt, cls, cls+prompt, partial_1\n if transfer_type == \"partial-1\":\n total_layer = len(self.enc.layers)\n total_blocks = len(self.enc.layers[-1].blocks)\n for k, p in self.enc.named_parameters():\n if \"layers.{}.blocks.{}\".format(total_layer - 1, total_blocks - 1) not in k and \"norm.weight\" != k and \"norm.bias\" != k: # noqa\n p.requires_grad = False\n\n elif transfer_type == \"partial-2\":\n total_layer = len(self.enc.layers)\n for k, p in self.enc.named_parameters():\n if \"layers.{}\".format(total_layer - 1) not in k and \"norm.weight\" != k and \"norm.bias\" != k: # noqa\n p.requires_grad = False\n\n elif transfer_type == \"partial-4\":\n total_layer = len(self.enc.layers)\n total_blocks = len(self.enc.layers[-2].blocks)\n\n for k, p in self.enc.named_parameters():\n if \"layers.{}\".format(total_layer - 1) not in k and \"layers.{}.blocks.{}\".format(total_layer - 2, total_blocks - 1) not in k and \"layers.{}.blocks.{}\".format(total_layer - 2, total_blocks - 2) not in k and \"layers.{}.downsample\".format(total_layer - 2) not in k and \"norm.weight\" != k and \"norm.bias\" != k: # noqa\n p.requires_grad = False\n\n elif transfer_type == \"linear\" or transfer_type == \"side\":\n for k, p in self.enc.named_parameters():\n p.requires_grad = False\n\n elif transfer_type == \"tinytl-bias\":\n for k, p in self.enc.named_parameters():\n if 'bias' not in k:\n p.requires_grad = False\n\n elif transfer_type == \"prompt\" and prompt_cfg.LOCATION in [\"below\"]:\n for k, p in self.enc.named_parameters():\n if \"prompt\" not in k and \"patch_embed\" not in k:\n p.requires_grad = False\n\n elif transfer_type == \"prompt\":\n for k, p in self.enc.named_parameters():\n if \"prompt\" not in k:\n p.requires_grad = False\n\n elif transfer_type == \"prompt+bias\":\n for k, p in self.enc.named_parameters():\n if \"prompt\" not in k and 'bias' not in k:\n p.requires_grad = False\n\n elif transfer_type == \"end2end\":\n logger.info(\"Enable all parameters update during training\")\n\n else:\n raise ValueError(\"transfer type {} is not supported\".format(\n transfer_type))" }, { "identifier": "SSLViT", "path": "vpt_main/src/models/vit_models.py", "snippet": "class SSLViT(ViT):\n \"\"\"moco-v3 and mae model.\"\"\"\n\n def __init__(self, cfg):\n super(SSLViT, self).__init__(cfg)\n\n def build_backbone(self, prompt_cfg, cfg, adapter_cfg, load_pretrain, vis):\n if \"moco\" in cfg.DATA.FEATURE:\n build_fn = build_mocov3_model\n elif \"mae\" in cfg.DATA.FEATURE:\n build_fn = build_mae_model\n\n self.enc, self.feat_dim = build_fn(\n cfg.DATA.FEATURE, cfg.DATA.CROPSIZE,\n prompt_cfg, cfg.MODEL.MODEL_ROOT, adapter_cfg=adapter_cfg\n )\n\n transfer_type = cfg.MODEL.TRANSFER_TYPE\n # linear, prompt, cls, cls+prompt, partial_1\n if transfer_type == \"partial-1\":\n total_layer = len(self.enc.blocks)\n for k, p in self.enc.named_parameters():\n if \"blocks.{}\".format(total_layer - 1) not in k and \"fc_norm\" not in k and k != \"norm\": # noqa\n p.requires_grad = False\n elif transfer_type == \"partial-2\":\n total_layer = len(self.enc.blocks)\n for k, p in self.enc.named_parameters():\n if \"blocks.{}\".format(total_layer - 1) not in k and \"blocks.{}\".format(total_layer - 2) not in k and \"fc_norm\" not in k and k != \"norm\": # noqa\n p.requires_grad = False\n\n elif transfer_type == \"partial-4\":\n total_layer = len(self.enc.blocks)\n for k, p in self.enc.named_parameters():\n if \"blocks.{}\".format(total_layer - 1) not in k and \"blocks.{}\".format(total_layer - 2) not in k and \"blocks.{}\".format(total_layer - 3) not in k and \"blocks.{}\".format(total_layer - 4) not in k and \"fc_norm\" not in k and k != \"norm\": # noqa\n p.requires_grad = False\n\n elif transfer_type == \"linear\" or transfer_type == \"sidetune\":\n for k, p in self.enc.named_parameters():\n p.requires_grad = False\n\n elif transfer_type == \"tinytl-bias\":\n for k, p in self.enc.named_parameters():\n if 'bias' not in k:\n p.requires_grad = False\n\n elif transfer_type == \"prompt+bias\":\n for k, p in self.enc.named_parameters():\n if \"prompt\" not in k and 'bias' not in k:\n p.requires_grad = False\n\n elif transfer_type == \"prompt\" and prompt_cfg.LOCATION == \"below\":\n for k, p in self.enc.named_parameters():\n if \"prompt\" not in k and \"patch_embed.proj.weight\" not in k and \"patch_embed.proj.bias\" not in k:\n p.requires_grad = False\n\n elif transfer_type == \"prompt\":\n for k, p in self.enc.named_parameters():\n if \"prompt\" not in k:\n p.requires_grad = False\n\n elif transfer_type == \"end2end\":\n logger.info(\"Enable all parameters update during training\")\n \n # adapter\n elif transfer_type == \"adapter\":\n for k, p in self.enc.named_parameters():\n if \"adapter\" not in k:\n p.requires_grad = False\n\n else:\n raise ValueError(\"transfer type {} is not supported\".format(\n transfer_type))" }, { "identifier": "logging", "path": "vpt_main/src/utils/logging.py", "snippet": "_FORMAT = \"[%(levelname)s: %(filename)s: %(lineno)4d]: %(message)s\"\ndef _suppress_print():\n def print_pass(*objects, sep=\" \", end=\"\\n\", file=sys.stdout, flush=False):\ndef _cached_log_stream(filename):\ndef setup_logging(\n num_gpu, num_shards, output=\"\", name=\"visual_prompt\", color=True):\ndef setup_single_logging(name, output=\"\"):\ndef get_logger(name):\ndef log_json_stats(stats, sort_keys=True):\n def __init__(self, *args, **kwargs):\n def formatMessage(self, record: logging.LogRecord) -> str:\nclass _ColorfulFormatter(logging.Formatter):" } ]
from tabnanny import verbose from .resnet import ResNet from .convnext import ConvNeXt from .vit_models import ViT, Swin, SSLViT from ..utils import logging import torch
10,093
#!/usr/bin/env python3 """ Model construction functions. """ logger = logging.get_logger("visual_prompt") # Supported model types _MODEL_TYPES = {
#!/usr/bin/env python3 """ Model construction functions. """ logger = logging.get_logger("visual_prompt") # Supported model types _MODEL_TYPES = {
"resnet": ResNet,
0
2023-12-12 13:19:17+00:00
12k
KULL-Centre/_2023_Blaabjerg_SSEmb
src/models/msa_transformer/modules.py
[ { "identifier": "MultiheadAttention", "path": "src/models/msa_transformer/multihead_attention.py", "snippet": "class MultiheadAttention(nn.Module):\n \"\"\"Multi-headed attention.\n\n See \"Attention Is All You Need\" for more details.\n \"\"\"\n\n def __init__(\n self,\n embed_dim,\n num_heads,\n kdim=None,\n vdim=None,\n dropout=0.0,\n bias=True,\n add_bias_kv: bool = False,\n add_zero_attn: bool = False,\n self_attention: bool = False,\n encoder_decoder_attention: bool = False,\n use_rotary_embeddings: bool = False,\n ):\n super().__init__()\n self.embed_dim = embed_dim\n self.kdim = kdim if kdim is not None else embed_dim\n self.vdim = vdim if vdim is not None else embed_dim\n self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim\n\n self.num_heads = num_heads\n self.dropout = dropout\n self.head_dim = embed_dim // num_heads\n assert (\n self.head_dim * num_heads == self.embed_dim\n ), \"embed_dim must be divisible by num_heads\"\n self.scaling = self.head_dim**-0.5\n\n self.self_attention = self_attention\n self.encoder_decoder_attention = encoder_decoder_attention\n\n assert not self.self_attention or self.qkv_same_dim, (\n \"Self-attention requires query, key and \" \"value to be of the same size\"\n )\n\n self.k_proj = nn.Linear(self.kdim, embed_dim, bias=bias)\n self.v_proj = nn.Linear(self.vdim, embed_dim, bias=bias)\n self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n\n self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n\n if add_bias_kv:\n self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))\n self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))\n else:\n self.bias_k = self.bias_v = None\n\n self.add_zero_attn = add_zero_attn\n\n self.reset_parameters()\n\n self.onnx_trace = False\n self.rot_emb = None\n if use_rotary_embeddings:\n self.rot_emb = RotaryEmbedding(dim=self.head_dim)\n\n self.enable_torch_version = False\n if hasattr(F, \"multi_head_attention_forward\"):\n self.enable_torch_version = True\n else:\n self.enable_torch_version = False\n\n def prepare_for_onnx_export_(self):\n self.onnx_trace = True\n\n def reset_parameters(self):\n if self.qkv_same_dim:\n # Empirically observed the convergence to be much better with\n # the scaled initialization\n nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))\n nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))\n nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))\n else:\n nn.init.xavier_uniform_(self.k_proj.weight)\n nn.init.xavier_uniform_(self.v_proj.weight)\n nn.init.xavier_uniform_(self.q_proj.weight)\n\n nn.init.xavier_uniform_(self.out_proj.weight)\n if self.out_proj.bias is not None:\n nn.init.constant_(self.out_proj.bias, 0.0)\n if self.bias_k is not None:\n nn.init.xavier_normal_(self.bias_k)\n if self.bias_v is not None:\n nn.init.xavier_normal_(self.bias_v)\n\n def forward(\n self,\n query,\n key: Optional[Tensor],\n value: Optional[Tensor],\n key_padding_mask: Optional[Tensor] = None,\n incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,\n need_weights: bool = True,\n static_kv: bool = False,\n attn_mask: Optional[Tensor] = None,\n before_softmax: bool = False,\n need_head_weights: bool = False,\n ) -> Tuple[Tensor, Optional[Tensor]]:\n \"\"\"Input shape: Time x Batch x Channel\n\n Args:\n key_padding_mask (ByteTensor, optional): mask to exclude\n keys that are pads, of shape `(batch, src_len)`, where\n padding elements are indicated by 1s.\n need_weights (bool, optional): return the attention weights,\n averaged over heads (default: False).\n attn_mask (ByteTensor, optional): typically used to\n implement causal attention, where the mask prevents the\n attention from looking forward in time (default: None).\n before_softmax (bool, optional): return the raw attention\n weights and values before the attention softmax.\n need_head_weights (bool, optional): return the attention\n weights for each head. Implies *need_weights*. Default:\n return the average attention weights over all heads.\n \"\"\"\n if need_head_weights:\n need_weights = True\n\n tgt_len, bsz, embed_dim = query.size()\n assert embed_dim == self.embed_dim\n assert list(query.size()) == [tgt_len, bsz, embed_dim]\n\n if (\n not self.rot_emb\n and self.enable_torch_version\n and not self.onnx_trace\n and incremental_state is None\n and not static_kv\n # A workaround for quantization to work. Otherwise JIT compilation\n # treats bias in linear module as method.\n and not torch.jit.is_scripting()\n and not need_head_weights\n ):\n assert key is not None and value is not None\n return F.multi_head_attention_forward(\n query,\n key,\n value,\n self.embed_dim,\n self.num_heads,\n torch.empty([0]),\n torch.cat((self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)),\n self.bias_k,\n self.bias_v,\n self.add_zero_attn,\n self.dropout,\n self.out_proj.weight,\n self.out_proj.bias,\n self.training,\n key_padding_mask,\n need_weights,\n attn_mask,\n use_separate_proj_weight=True,\n q_proj_weight=self.q_proj.weight,\n k_proj_weight=self.k_proj.weight,\n v_proj_weight=self.v_proj.weight,\n )\n if incremental_state is not None:\n saved_state = self._get_input_buffer(incremental_state)\n if saved_state is not None and \"prev_key\" in saved_state:\n # previous time steps are cached - no need to recompute\n # key and value if they are static\n if static_kv:\n assert self.encoder_decoder_attention and not self.self_attention\n key = value = None\n else:\n saved_state = None\n\n if self.self_attention:\n q = self.q_proj(query)\n k = self.k_proj(query)\n v = self.v_proj(query)\n elif self.encoder_decoder_attention:\n # encoder-decoder attention\n q = self.q_proj(query)\n if key is None:\n assert value is None\n k = v = None\n else:\n k = self.k_proj(key)\n v = self.v_proj(key)\n\n else:\n assert key is not None and value is not None\n q = self.q_proj(query)\n k = self.k_proj(key)\n v = self.v_proj(value)\n q *= self.scaling\n\n if self.bias_k is not None:\n assert self.bias_v is not None\n k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])\n v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])\n if attn_mask is not None:\n attn_mask = torch.cat(\n [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1\n )\n if key_padding_mask is not None:\n key_padding_mask = torch.cat(\n [\n key_padding_mask,\n key_padding_mask.new_zeros(key_padding_mask.size(0), 1),\n ],\n dim=1,\n )\n\n q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim).transpose(0, 1)\n if k is not None:\n k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)\n if v is not None:\n v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)\n\n if saved_state is not None:\n # saved states are stored with shape (bsz, num_heads, seq_len, head_dim)\n if \"prev_key\" in saved_state:\n _prev_key = saved_state[\"prev_key\"]\n assert _prev_key is not None\n prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim)\n if static_kv:\n k = prev_key\n else:\n assert k is not None\n k = torch.cat([prev_key, k], dim=1)\n if \"prev_value\" in saved_state:\n _prev_value = saved_state[\"prev_value\"]\n assert _prev_value is not None\n prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim)\n if static_kv:\n v = prev_value\n else:\n assert v is not None\n v = torch.cat([prev_value, v], dim=1)\n prev_key_padding_mask: Optional[Tensor] = None\n if \"prev_key_padding_mask\" in saved_state:\n prev_key_padding_mask = saved_state[\"prev_key_padding_mask\"]\n assert k is not None and v is not None\n key_padding_mask = MultiheadAttention._append_prev_key_padding_mask(\n key_padding_mask=key_padding_mask,\n prev_key_padding_mask=prev_key_padding_mask,\n batch_size=bsz,\n src_len=k.size(1),\n static_kv=static_kv,\n )\n\n saved_state[\"prev_key\"] = k.view(bsz, self.num_heads, -1, self.head_dim)\n saved_state[\"prev_value\"] = v.view(bsz, self.num_heads, -1, self.head_dim)\n saved_state[\"prev_key_padding_mask\"] = key_padding_mask\n # In this branch incremental_state is never None\n assert incremental_state is not None\n incremental_state = self._set_input_buffer(incremental_state, saved_state)\n assert k is not None\n src_len = k.size(1)\n\n # This is part of a workaround to get around fork/join parallelism\n # not supporting Optional types.\n if key_padding_mask is not None and key_padding_mask.dim() == 0:\n key_padding_mask = None\n\n if key_padding_mask is not None:\n assert key_padding_mask.size(0) == bsz\n assert key_padding_mask.size(1) == src_len\n\n if self.add_zero_attn:\n assert v is not None\n src_len += 1\n k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)\n v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)\n if attn_mask is not None:\n attn_mask = torch.cat(\n [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1\n )\n if key_padding_mask is not None:\n key_padding_mask = torch.cat(\n [\n key_padding_mask,\n torch.zeros(key_padding_mask.size(0), 1).type_as(key_padding_mask),\n ],\n dim=1,\n )\n\n if self.rot_emb:\n q, k = self.rot_emb(q, k)\n\n attn_weights = torch.bmm(q, k.transpose(1, 2))\n attn_weights = MultiheadAttention.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz)\n\n assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]\n\n if attn_mask is not None:\n attn_mask = attn_mask.unsqueeze(0)\n if self.onnx_trace:\n attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1)\n attn_weights += attn_mask\n\n if key_padding_mask is not None:\n # don't attend to padding symbols\n attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)\n attn_weights = attn_weights.masked_fill(\n key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), float(\"-inf\")\n )\n attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)\n\n if before_softmax:\n return attn_weights, v\n\n attn_weights_float = utils_softmax(attn_weights, dim=-1, onnx_trace=self.onnx_trace)\n attn_weights = attn_weights_float.type_as(attn_weights)\n attn_probs = F.dropout(\n attn_weights_float.type_as(attn_weights),\n p=self.dropout,\n training=self.training,\n )\n assert v is not None\n attn = torch.bmm(attn_probs, v)\n assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]\n if self.onnx_trace and attn.size(1) == 1:\n # when ONNX tracing a single decoder step (sequence length == 1)\n # the transpose is a no-op copy before view, thus unnecessary\n attn = attn.contiguous().view(tgt_len, bsz, embed_dim)\n else:\n attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)\n attn = self.out_proj(attn)\n attn_weights: Optional[Tensor] = None\n if need_weights:\n attn_weights = attn_weights_float.view(\n bsz, self.num_heads, tgt_len, src_len\n ).type_as(attn).transpose(1, 0)\n if not need_head_weights:\n # average attention weights over heads\n attn_weights = attn_weights.mean(dim=0)\n\n return attn, attn_weights\n\n @staticmethod\n def _append_prev_key_padding_mask(\n key_padding_mask: Optional[Tensor],\n prev_key_padding_mask: Optional[Tensor],\n batch_size: int,\n src_len: int,\n static_kv: bool,\n ) -> Optional[Tensor]:\n # saved key padding masks have shape (bsz, seq_len)\n if prev_key_padding_mask is not None and static_kv:\n new_key_padding_mask = prev_key_padding_mask\n elif prev_key_padding_mask is not None and key_padding_mask is not None:\n new_key_padding_mask = torch.cat(\n [prev_key_padding_mask.float(), key_padding_mask.float()], dim=1\n )\n # During incremental decoding, as the padding token enters and\n # leaves the frame, there will be a time when prev or current\n # is None\n elif prev_key_padding_mask is not None:\n filler = torch.zeros(\n (batch_size, src_len - prev_key_padding_mask.size(1)),\n device=prev_key_padding_mask.device,\n )\n new_key_padding_mask = torch.cat(\n [prev_key_padding_mask.float(), filler.float()], dim=1\n )\n elif key_padding_mask is not None:\n filler = torch.zeros(\n (batch_size, src_len - key_padding_mask.size(1)),\n device=key_padding_mask.device,\n )\n new_key_padding_mask = torch.cat([filler.float(), key_padding_mask.float()], dim=1)\n else:\n new_key_padding_mask = prev_key_padding_mask\n return new_key_padding_mask\n\n @torch.jit.export\n def reorder_incremental_state(\n self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], new_order: Tensor\n ):\n \"\"\"Reorder buffered internal state (for incremental generation).\"\"\"\n input_buffer = self._get_input_buffer(incremental_state)\n if input_buffer is not None:\n for k in input_buffer.keys():\n input_buffer_k = input_buffer[k]\n if input_buffer_k is not None:\n if self.encoder_decoder_attention and input_buffer_k.size(0) == new_order.size(\n 0\n ):\n break\n input_buffer[k] = input_buffer_k.index_select(0, new_order)\n incremental_state = self._set_input_buffer(incremental_state, input_buffer)\n return incremental_state\n\n def _get_input_buffer(\n self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]\n ) -> Dict[str, Optional[Tensor]]:\n result = self.get_incremental_state(incremental_state, \"attn_state\")\n if result is not None:\n return result\n else:\n empty_result: Dict[str, Optional[Tensor]] = {}\n return empty_result\n\n def _set_input_buffer(\n self,\n incremental_state: Dict[str, Dict[str, Optional[Tensor]]],\n buffer: Dict[str, Optional[Tensor]],\n ):\n return self.set_incremental_state(incremental_state, \"attn_state\", buffer)\n\n def apply_sparse_mask(attn_weights, tgt_len: int, src_len: int, bsz: int):\n return attn_weights\n\n def upgrade_state_dict_named(self, state_dict, name):\n prefix = name + \".\" if name != \"\" else \"\"\n items_to_add = {}\n keys_to_remove = []\n for k in state_dict.keys():\n if k.endswith(prefix + \"in_proj_weight\"):\n # in_proj_weight used to be q + k + v with same dimensions\n dim = int(state_dict[k].shape[0] / 3)\n items_to_add[prefix + \"q_proj.weight\"] = state_dict[k][:dim]\n items_to_add[prefix + \"k_proj.weight\"] = state_dict[k][dim : 2 * dim]\n items_to_add[prefix + \"v_proj.weight\"] = state_dict[k][2 * dim :]\n\n keys_to_remove.append(k)\n\n k_bias = prefix + \"in_proj_bias\"\n if k_bias in state_dict.keys():\n dim = int(state_dict[k].shape[0] / 3)\n items_to_add[prefix + \"q_proj.bias\"] = state_dict[k_bias][:dim]\n items_to_add[prefix + \"k_proj.bias\"] = state_dict[k_bias][dim : 2 * dim]\n items_to_add[prefix + \"v_proj.bias\"] = state_dict[k_bias][2 * dim :]\n\n keys_to_remove.append(prefix + \"in_proj_bias\")\n\n for k in keys_to_remove:\n del state_dict[k]\n\n for key, value in items_to_add.items():\n state_dict[key] = value" }, { "identifier": "ColumnSelfAttention", "path": "src/models/msa_transformer/axial_attention.py", "snippet": "class ColumnSelfAttention(nn.Module):\n \"\"\"Compute self-attention over columns of a 2D input.\"\"\"\n\n def __init__(\n self,\n embed_dim,\n num_heads,\n dropout=0.0,\n max_tokens_per_msa: int = 2 ** 16,\n ):\n super().__init__()\n\n self.num_heads = num_heads\n self.dropout = dropout\n self.head_dim = embed_dim // num_heads\n self.scaling = self.head_dim ** -0.5\n self.max_tokens_per_msa = max_tokens_per_msa\n\n self.k_proj = nn.Linear(embed_dim, embed_dim)\n self.v_proj = nn.Linear(embed_dim, embed_dim)\n self.q_proj = nn.Linear(embed_dim, embed_dim)\n\n self.out_proj = nn.Linear(embed_dim, embed_dim)\n self.dropout_module = nn.Dropout(dropout)\n\n def _batched_forward(\n self,\n x,\n self_attn_mask=None,\n self_attn_padding_mask=None,\n ):\n num_rows, num_cols, batch_size, embed_dim = x.size()\n max_cols = max(1, self.max_tokens_per_msa // num_rows)\n outputs = []\n attns = []\n for start in range(0, num_cols, max_cols):\n output, attn = self(\n x[:, start : start + max_cols],\n self_attn_mask=self_attn_mask,\n self_attn_padding_mask=self_attn_padding_mask[:, :, start : start + max_cols]\n if self_attn_padding_mask is not None\n else None,\n )\n outputs.append(output)\n attns.append(attn)\n output = torch.cat(outputs, 1)\n attns = torch.cat(attns, 1)\n return output, attns\n\n def compute_attention_update(\n self,\n x,\n self_attn_mask=None,\n self_attn_padding_mask=None,\n ):\n num_rows, num_cols, batch_size, embed_dim = x.size()\n if num_rows == 1:\n # if there is only 1 position, this is equivalent and doesn't break with padding\n attn_probs = torch.ones(\n self.num_heads,\n num_cols,\n batch_size,\n num_rows,\n num_rows,\n device=x.device,\n dtype=x.dtype,\n )\n output = self.out_proj(self.v_proj(x))\n else:\n q = self.q_proj(x).view(num_rows, num_cols, batch_size, self.num_heads, self.head_dim)\n k = self.k_proj(x).view(num_rows, num_cols, batch_size, self.num_heads, self.head_dim)\n v = self.v_proj(x).view(num_rows, num_cols, batch_size, self.num_heads, self.head_dim)\n q *= self.scaling\n\n attn_weights = torch.einsum(\"icnhd,jcnhd->hcnij\", q, k)\n\n if self_attn_mask is not None:\n raise NotImplementedError\n if self_attn_padding_mask is not None:\n attn_weights = attn_weights.masked_fill(\n self_attn_padding_mask.permute(2, 0, 1).unsqueeze(0).unsqueeze(3),\n -10000,\n )\n\n attn_probs = attn_weights.softmax(-1)\n attn_probs = self.dropout_module(attn_probs)\n context = torch.einsum(\"hcnij,jcnhd->icnhd\", attn_probs, v)\n context = context.contiguous().view(num_rows, num_cols, batch_size, embed_dim)\n output = self.out_proj(context)\n return output, attn_probs\n\n def forward(\n self,\n x,\n self_attn_mask=None,\n self_attn_padding_mask=None,\n ):\n num_rows, num_cols, batch_size, embed_dim = x.size()\n # if False and num_rows * num_cols > 2 ** 14 and not torch.is_grad_enabled():\n if (num_rows * num_cols) > self.max_tokens_per_msa and not torch.is_grad_enabled():\n return self._batched_forward(\n x,\n self_attn_mask,\n self_attn_padding_mask,\n )\n else:\n return self.compute_attention_update(x, self_attn_mask, self_attn_padding_mask)" }, { "identifier": "RowSelfAttention", "path": "src/models/msa_transformer/axial_attention.py", "snippet": "class RowSelfAttention(nn.Module):\n \"\"\"Compute self-attention over rows of a 2D input.\"\"\"\n\n def __init__(\n self,\n embed_dim,\n num_heads,\n dropout=0.0,\n max_tokens_per_msa: int = 2 ** 16,\n ):\n super().__init__()\n self.num_heads = num_heads\n self.dropout = dropout\n self.head_dim = embed_dim // num_heads\n self.scaling = self.head_dim ** -0.5\n self.max_tokens_per_msa = max_tokens_per_msa\n self.attn_shape = \"hnij\"\n\n self.k_proj = nn.Linear(embed_dim, embed_dim)\n self.v_proj = nn.Linear(embed_dim, embed_dim)\n self.q_proj = nn.Linear(embed_dim, embed_dim)\n\n self.out_proj = nn.Linear(embed_dim, embed_dim)\n self.dropout_module = nn.Dropout(dropout)\n\n def align_scaling(self, q):\n num_rows = q.size(0)\n return self.scaling / math.sqrt(num_rows)\n\n def _batched_forward(\n self,\n x,\n self_attn_mask=None,\n self_attn_padding_mask=None,\n ):\n num_rows, num_cols, batch_size, embed_dim = x.size()\n max_rows = max(1, self.max_tokens_per_msa // num_cols)\n attns = 0\n scaling = self.align_scaling(x)\n for start in range(0, num_rows, max_rows):\n attn_weights = self.compute_attention_weights(\n x[start : start + max_rows],\n scaling,\n self_attn_mask=self_attn_mask,\n self_attn_padding_mask=self_attn_padding_mask[:, start : start + max_rows]\n if self_attn_padding_mask is not None\n else None,\n )\n attns += attn_weights\n attn_probs = attns.softmax(-1)\n attn_probs = self.dropout_module(attn_probs)\n\n outputs = []\n for start in range(0, num_rows, max_rows):\n output = self.compute_attention_update(x[start : start + max_rows], attn_probs)\n outputs.append(output)\n\n output = torch.cat(outputs, 0)\n return output, attn_probs\n\n def compute_attention_weights(\n self,\n x,\n scaling: float,\n self_attn_mask=None,\n self_attn_padding_mask=None,\n ):\n num_rows, num_cols, batch_size, embed_dim = x.size()\n q = self.q_proj(x).view(num_rows, num_cols, batch_size, self.num_heads, self.head_dim)\n k = self.k_proj(x).view(num_rows, num_cols, batch_size, self.num_heads, self.head_dim)\n q *= scaling\n if self_attn_padding_mask is not None:\n # Zero out any padded aligned positions - this is important since\n # we take a sum across the alignment axis.\n q *= 1 - self_attn_padding_mask.permute(1, 2, 0).unsqueeze(3).unsqueeze(4).to(q)\n\n attn_weights = torch.einsum(f\"rinhd,rjnhd->{self.attn_shape}\", q, k)\n\n if self_attn_mask is not None:\n attn_weights = attn_weights.masked_fill(\n self_attn_mask.unsqueeze(0).unsqueeze(0),\n -10000,\n )\n\n if self_attn_padding_mask is not None:\n attn_weights = attn_weights.masked_fill(\n self_attn_padding_mask[:, 0].unsqueeze(0).unsqueeze(2),\n -10000,\n )\n\n return attn_weights\n\n def compute_attention_update(\n self,\n x,\n attn_probs,\n ):\n num_rows, num_cols, batch_size, embed_dim = x.size()\n v = self.v_proj(x).view(num_rows, num_cols, batch_size, self.num_heads, self.head_dim)\n context = torch.einsum(f\"{self.attn_shape},rjnhd->rinhd\", attn_probs, v)\n context = context.contiguous().view(num_rows, num_cols, batch_size, embed_dim)\n output = self.out_proj(context)\n return output\n\n def forward(\n self,\n x,\n self_attn_mask=None,\n self_attn_padding_mask=None,\n ):\n num_rows, num_cols, batch_size, embed_dim = x.size()\n if (num_rows * num_cols > self.max_tokens_per_msa) and not torch.is_grad_enabled():\n return self._batched_forward(x, self_attn_mask, self_attn_padding_mask)\n else:\n scaling = self.align_scaling(x)\n attn_weights = self.compute_attention_weights(\n x, scaling, self_attn_mask, self_attn_padding_mask\n )\n attn_probs = attn_weights.softmax(-1)\n attn_probs = self.dropout_module(attn_probs)\n output = self.compute_attention_update(x, attn_probs)\n return output, attn_probs" } ]
import math import torch import torch.nn as nn import torch.nn.functional as F from typing import Tuple, Optional from .multihead_attention import MultiheadAttention # noqa from .axial_attention import ColumnSelfAttention, RowSelfAttention from apex.normalization import FusedLayerNorm as _FusedLayerNorm from torch.nn import LayerNorm as ESM1bLayerNorm
7,565
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. def gelu(x): """Implementation of the gelu activation function. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) """ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) def symmetrize(x): "Make layer symmetric in final two dimensions, used for contact prediction." return x + x.transpose(-1, -2) def apc(x): "Perform average product correct, used for contact prediction." a1 = x.sum(-1, keepdims=True) a2 = x.sum(-2, keepdims=True) a12 = x.sum((-1, -2), keepdims=True) avg = a1 * a2 avg.div_(a12) # in-place to reduce memory normalized = x - avg return normalized class ESM1LayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-12, affine=True): """Construct a layernorm layer in the TF style (eps inside the sqrt).""" super().__init__() self.hidden_size = (hidden_size,) if isinstance(hidden_size, int) else tuple(hidden_size) self.eps = eps self.affine = bool(affine) if self.affine: self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) else: self.weight, self.bias = None, None def forward(self, x): dims = tuple(-(i + 1) for i in range(len(self.hidden_size))) means = x.mean(dims, keepdim=True) x_zeromean = x - means variances = x_zeromean.pow(2).mean(dims, keepdim=True) x = x_zeromean / torch.sqrt(variances + self.eps) if self.affine: x = (self.weight * x) + self.bias return x try: class ESM1bLayerNorm(_FusedLayerNorm): @torch.jit.unused def forward(self, x): if not x.is_cuda: return super().forward(x) else: with torch.cuda.device(x.device): return super().forward(x) except ImportError: class TransformerLayer(nn.Module): """Transformer layer block.""" def __init__( self, embed_dim, ffn_embed_dim, attention_heads, add_bias_kv=True, use_esm1b_layer_norm=False, use_rotary_embeddings: bool = False, ): super().__init__() self.embed_dim = embed_dim self.ffn_embed_dim = ffn_embed_dim self.attention_heads = attention_heads self.use_rotary_embeddings = use_rotary_embeddings self._init_submodules(add_bias_kv, use_esm1b_layer_norm) def _init_submodules(self, add_bias_kv, use_esm1b_layer_norm): BertLayerNorm = ESM1bLayerNorm if use_esm1b_layer_norm else ESM1LayerNorm
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. def gelu(x): """Implementation of the gelu activation function. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) """ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) def symmetrize(x): "Make layer symmetric in final two dimensions, used for contact prediction." return x + x.transpose(-1, -2) def apc(x): "Perform average product correct, used for contact prediction." a1 = x.sum(-1, keepdims=True) a2 = x.sum(-2, keepdims=True) a12 = x.sum((-1, -2), keepdims=True) avg = a1 * a2 avg.div_(a12) # in-place to reduce memory normalized = x - avg return normalized class ESM1LayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-12, affine=True): """Construct a layernorm layer in the TF style (eps inside the sqrt).""" super().__init__() self.hidden_size = (hidden_size,) if isinstance(hidden_size, int) else tuple(hidden_size) self.eps = eps self.affine = bool(affine) if self.affine: self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) else: self.weight, self.bias = None, None def forward(self, x): dims = tuple(-(i + 1) for i in range(len(self.hidden_size))) means = x.mean(dims, keepdim=True) x_zeromean = x - means variances = x_zeromean.pow(2).mean(dims, keepdim=True) x = x_zeromean / torch.sqrt(variances + self.eps) if self.affine: x = (self.weight * x) + self.bias return x try: class ESM1bLayerNorm(_FusedLayerNorm): @torch.jit.unused def forward(self, x): if not x.is_cuda: return super().forward(x) else: with torch.cuda.device(x.device): return super().forward(x) except ImportError: class TransformerLayer(nn.Module): """Transformer layer block.""" def __init__( self, embed_dim, ffn_embed_dim, attention_heads, add_bias_kv=True, use_esm1b_layer_norm=False, use_rotary_embeddings: bool = False, ): super().__init__() self.embed_dim = embed_dim self.ffn_embed_dim = ffn_embed_dim self.attention_heads = attention_heads self.use_rotary_embeddings = use_rotary_embeddings self._init_submodules(add_bias_kv, use_esm1b_layer_norm) def _init_submodules(self, add_bias_kv, use_esm1b_layer_norm): BertLayerNorm = ESM1bLayerNorm if use_esm1b_layer_norm else ESM1LayerNorm
self.self_attn = MultiheadAttention(
0
2023-12-09 11:42:34+00:00
12k
ChatClue/ChatClue
osiris.py
[ { "identifier": "OSHelper", "path": "utils/os/helpers.py", "snippet": "class OSHelper:\n \"\"\"\n Provides utility methods for operating system level operations, particularly file management.\n\n This class includes static methods for performing various file system tasks such as cleaning up orphaned files and retrieving files.\n \"\"\"\n\n @staticmethod\n def find_closest_image(directory, target_time):\n \"\"\"\n Finds the closest image file in a directory based on the target time.\n\n This function searches through all JPG files in the specified directory and \n selects the one whose creation time is closest to, but not earlier than, \n the target time.\n\n Args:\n directory (str): The directory path where the image files are stored.\n target_time (float): The target time (in seconds since epoch) to compare the file creation times against.\n\n Returns:\n str: The path of the closest image file. Returns None if no suitable file is found.\n \"\"\"\n closest_file = None\n closest_time_diff = None\n\n # Iterate over each file in the specified directory\n for filename in os.listdir(directory):\n if filename.lower().endswith(\".jpg\"): # Check if the file is a JPG image\n filepath = os.path.join(directory, filename)\n filetime = os.path.getmtime(filepath) # Get the modification time of the file\n # Check if the file's time is later than the target time and if it's the closest so far\n if filetime > target_time:\n logging.info(f\"File is close: {filepath} - Time: {filetime}\")\n time_diff = filetime - target_time\n if closest_time_diff is None or time_diff < closest_time_diff:\n closest_file = filepath\n closest_time_diff = time_diff\n return closest_file\n\n @staticmethod\n def convert_image_to_base64(filepath):\n \"\"\"\n Converts an image file to a Base64 encoded string.\n\n This function reads the image file from the given filepath, encodes it in Base64,\n and then decodes it to a UTF-8 string, which can be easily used for data transfer \n or embedding in web pages.\n\n Args:\n filepath (str): The path of the image file to be converted.\n\n Returns:\n str: The Base64 encoded string of the image.\n \"\"\"\n with open(filepath, \"rb\") as image_file:\n # Read the file and encode it in Base64\n return base64.b64encode(image_file.read()).decode(\"utf-8\")\n\n @staticmethod\n def clear_orphaned_audio_files():\n \"\"\"\n Removes all audio files in a specific directory.\n\n This method is used to clear out any leftover audio files in the 'tmp/audio' directory. \n It iterates through all files in the specified directory and deletes them.\n \"\"\"\n # Specify the directory path for audio files\n directory_path = 'tmp/audio'\n\n # Iterate through and remove each file in the directory\n for filename in os.listdir(directory_path):\n file_path = os.path.join(directory_path, filename)\n try:\n os.remove(file_path)\n logging.info(f\"Removed file: {file_path}\")\n except OSError as e:\n logging.info(f\"Error removing file {file_path}: {e}\")\n \n @staticmethod\n def clear_orphaned_video_files():\n \"\"\"\n Removes all video files in a specific directory.\n\n This method is used to clear out any leftover video files in the 'tmp/video' directory. \n It iterates through all files in the specified directory and deletes them.\n \"\"\"\n # Specify the directory path for video files\n directory_path = 'tmp/video'\n\n # Iterate through and remove each file in the directory\n for filename in os.listdir(directory_path):\n file_path = os.path.join(directory_path, filename)\n try:\n os.remove(file_path)\n logging.info(f\"Removed file: {file_path}\")\n except OSError as e:\n logging.info(f\"Error removing file {file_path}: {e}\")\n\n @staticmethod\n def system_file_cleanup():\n \"\"\"\n Performs a general cleanup of system files.\n\n Currently, this method focuses on clearing orphaned audio files but can be expanded to include other cleanup tasks.\n \"\"\"\n # Clear orphaned audio files\n OSHelper.clear_orphaned_audio_files()\n OSHelper.clear_orphaned_video_files()\n \n @staticmethod\n def configure_tmp_directories():\n \"\"\"\n Ensures that the required directories (tmp/audio and tmp/video) exist.\n Creates them if they do not exist.\n \"\"\"\n directories = ['tmp/audio', 'tmp/video']\n for directory in directories:\n os.makedirs(directory, exist_ok=True)\n logging.info(f\"Checked and ensured directory exists: {directory}\")" }, { "identifier": "get_celery_app", "path": "celery_config.py", "snippet": "def get_celery_app():\n return celery_app" }, { "identifier": "DatabaseSetup", "path": "database/setup.py", "snippet": "class DatabaseSetup:\n \"\"\"\n This class is responsible for database setup tasks, particularly\n for ensuring that all defined tables in SQLAlchemy models are created in the database.\n \"\"\"\n\n @staticmethod\n def initial_setup():\n \"\"\"\n Creates tables in the database based on the SQLAlchemy models.\n\n This method uses the SQLAlchemy engine to connect to the database and creates\n any tables that haven't been created yet as defined in the SQLAlchemy model classes.\n It's intended to be run during the initial setup phase of the application.\n \"\"\"\n\n # Obtain the SQLAlchemy engine\n engine = get_engine()\n\n # Ensure vector extension is enabled.\n with engine.begin() as connection:\n # Create extension 'pgvector' if it is not created yet\n # Remember, you may need to install pgvector on your system before this will work properly.\n # https://github.com/pgvector/pgvector.git for instructions.\n connection.execute(text(\"CREATE EXTENSION IF NOT EXISTS vector\"))\n\n # Create all tables in the database defined in the SQLAlchemy models\n # This will have no effect on existing tables that match the model definitions\n Base.metadata.create_all(engine)" }, { "identifier": "broadcaster", "path": "broadcast/broadcaster.py", "snippet": "class Broadcaster:\n def __init__(self):\n def send_message(self, message):\n def start(self):\n def shutdown(self):" }, { "identifier": "AudioProcessor", "path": "audio/audio_processor.py", "snippet": "class AudioProcessor:\n \"\"\"\n A class to handle audio processing, including capturing audio input, \n processing it with Vosk for speech recognition, and responding using OpenAI's GPT model.\n\n Attributes:\n model (Vosk.Model): Vosk speech recognition model.\n samplerate (int): The sample rate for audio capture.\n device (str): The name of the audio input device.\n blocksize (int): The block size for audio processing.\n dump_filename (str): Filename to dump the audio input, if provided.\n \"\"\"\n\n def __init__(self):\n self.model = Model(lang=AUDIO_SETTINGS.get('VOSK_MODEL', \"en-us\"))\n self.samplerate = AUDIO_SETTINGS.get('SOUND_DEVICE_SAMPLERATE')\n self.device = AUDIO_SETTINGS.get('SOUND_DEVICE_DEVICE')\n self.blocksize = AUDIO_SETTINGS.get('SOUND_DEVICE_BLOCK_SIZE', 28000)\n self.dump_filename = AUDIO_SETTINGS.get('AUDIO_IN_DUMP_FILENAME')\n self.audio_queue = queue.Queue()\n self.openai_client = OpenAIClient()\n self.openai_conversation_builder = OpenAIConversationBuilder()\n self.tool_processor = ToolProcessor()\n self.broadcaster = broadcaster\n self.audio_out = get_audio_out()\n self.audio_out_response_buffer = ''\n self.full_assistant_response = ''\n self.last_wake_time = 0\n self.last_response_end_time = 0\n self.processing_openai_request = False\n self.shutdown_event = threading.Event()\n\n def open_dump_file(self):\n \"\"\"Opens the file to dump audio input if a filename is provided.\"\"\"\n if self.dump_filename is not None:\n self.dump_filename = open(self.dump_filename, \"wb\")\n\n def close_dump_file(self):\n \"\"\"Closes the audio dump file if it was opened.\"\"\"\n if self.dump_filename is not None:\n self.dump_filename.close()\n\n def should_process(self, result, current_time):\n \"\"\"\n Determines whether the robot should process the input based on wake phrases or elapsed time.\n\n Args:\n result (str): The recognized text from the audio input.\n current_time (float): The current time in seconds.\n\n Returns:\n bool: True if the input should be processed, False otherwise.\n \"\"\"\n return (not contains_quiet_please_phrase(result) and contains_wake_phrase(result)) or \\\n (not contains_quiet_please_phrase(result) and (current_time - self.last_wake_time <= 10) or (current_time - self.last_response_end_time <= 10) and not self.audio_out.is_playing) \\\n\n def update_wake_time(self):\n \"\"\"Updates the time when a wake phrase was last heard.\"\"\"\n self.last_wake_time = time.time()\n self.save_system_state()\n\n def update_response_end_time(self):\n \"\"\"Updates the time when the robot's last response ended.\"\"\"\n self.last_response_end_time = time.time()\n\n def callback(self, indata, frames, time, status):\n \"\"\"\n Callback function for audio input stream.\n\n Args:\n indata: The buffer containing the incoming sound.\n frames: The number of frames.\n time: Current stream time.\n status: Status of the stream.\n \"\"\"\n if status:\n logging.warning(status)\n self.audio_queue.put(bytes(indata))\n\n def process_stream(self):\n \"\"\"\n Processes the audio stream by recognizing speech and generating responses.\n\n Continuously captures audio, performs speech recognition, and generates responses using OpenAI.\n \"\"\"\n self.open_dump_file()\n try:\n with sd.RawInputStream(samplerate=self.samplerate, blocksize=self.blocksize, device=self.device,\n dtype=\"int16\", channels=1, callback=self.callback):\n rec = KaldiRecognizer(self.model, self.samplerate)\n openai_stream_thread = None\n\n while not self.shutdown_event.is_set():\n data, current_time = self.get_audio_data()\n result = self.process_recognition(data, rec)\n\n if result:\n openai_stream_thread = self.handle_speech(result, openai_stream_thread, current_time)\n\n self.handle_partial_results(rec)\n self.write_to_dump_file(data)\n self.process_openai_response()\n\n # except Exception as e:\n # logging.error(f\"An error occurred: {e}\")\n finally:\n self.close_dump_file()\n\n def get_audio_data(self):\n \"\"\"\n Retrieves audio data from the queue.\n\n Returns:\n tuple: A tuple containing the audio data and the current time.\n \"\"\"\n data = self.audio_queue.get()\n current_time = time.time()\n return data, current_time\n\n def process_recognition(self, data, rec):\n \"\"\"\n Processes the recognition of speech from audio data.\n\n Args:\n data: The audio data to be processed.\n rec (KaldiRecognizer): The Vosk recognizer instance.\n\n Returns:\n str or None: Recognized text or None if no significant speech is recognized.\n \"\"\"\n if rec.AcceptWaveform(data):\n result = json.loads(rec.Result())[\"text\"]\n if result not in ['', 'huh']:\n self.broadcaster.send_message(result)\n logging.info(\"ROBOT HEARD: \" + result)\n return result\n return None\n\n def handle_speech(self, result, openai_stream_thread, current_time):\n \"\"\"\n Processes the recognized speech and determines the appropriate response.\n\n Args:\n result (str): Recognized speech text.\n openai_stream_thread (threading.Thread): The current OpenAI stream thread.\n current_time (float): Current time in seconds.\n\n Returns:\n threading.Thread: Updated or new OpenAI stream thread.\n \"\"\"\n try:\n if self.should_process(result, current_time) and not self.processing_openai_request:\n self.update_wake_time()\n self.processing_openai_request = True\n if not openai_stream_thread or not openai_stream_thread.is_alive():\n self.openai_client.stop_signal.clear()\n is_tool_request, conversation = self.determine_tool_request(result)\n if is_tool_request:\n self.handle_tool_request(result, conversation)\n else:\n self.continue_conversation(result, conversation)\n else:\n logging.info(\"ROBOT THOUGHT: Ignoring Conversation, it doesn't appear to be relevant.\")\n finally:\n self.processing_openai_request = False\n return openai_stream_thread\n \n \n def determine_tool_request(self, result):\n \"\"\"\n Determines whether the given input text is a tool request.\n\n Args:\n result (str): The recognized text to evaluate.\n\n Returns:\n Tuple[bool, list]: A tuple containing a boolean indicating whether it's a tool request, \n and the conversation array for further processing.\n \"\"\"\n call_type_messages = self.openai_conversation_builder.create_check_if_tool_call_messages(result)\n openai_is_tool_response = self.openai_client.create_completion(call_type_messages, False, {\"type\": \"json_object\"}, openai_functions, True)\n \n is_tool_request = False\n conversation = self.openai_conversation_builder.create_recent_conversation_messages_array(result)\n\n try:\n if openai_is_tool_response and openai_is_tool_response.choices:\n is_tool_request = json.loads(openai_is_tool_response.choices[0].message.content).get(\"is_tool\", False)\n except (TypeError, AttributeError, json.JSONDecodeError):\n print(\"Error parsing OpenAI response or response not in expected format.\")\n\n return is_tool_request, conversation\n\n def handle_tool_request(self, result, conversation):\n \"\"\"\n Handles the processing of a tool request.\n\n Args:\n result (str): The recognized text.\n conversation (list): The conversation array built up to this point.\n \"\"\"\n tool_response = self.openai_client.create_completion(conversation, False, None, openai_functions)\n tool_response_message = tool_response.choices[0].message \n tool_calls = tool_response_message.tool_calls \n if tool_calls:\n self.process_tool_calls(tool_calls, result, conversation, tool_response_message)\n else:\n self.continue_conversation(result, conversation)\n\n def process_tool_calls(self, tool_calls, result, conversation, tool_response_message):\n \"\"\"\n Processes the tool calls received from OpenAI.\n\n Args:\n tool_calls (list): List of tool calls from OpenAI response.\n result (str): The recognized text.\n conversation (list): The conversation array.\n tool_response_message (Message): The tool response message from OpenAI.\n \"\"\"\n tool_call = tool_calls[0]\n tool_processor_response = self.tool_processor.process_tool_request(tool_call)\n if tool_processor_response[\"success\"]:\n self.handle_successful_tool_response(tool_processor_response, result, conversation, tool_response_message)\n else:\n self.audio_out.add_to_queue(get_tool_not_found_phrase())\n\n def handle_successful_tool_response(self, tool_processor_response, result, conversation, tool_response_message):\n \"\"\"\n Handles a successful tool response.\n\n Args:\n tool_processor_response (dict): The response from the tool processor.\n result (str): The recognized text.\n conversation (list): The conversation array.\n tool_response_message (Message): The tool response message from OpenAI.\n \"\"\"\n if tool_processor_response[\"is_conversational\"]:\n conversation.append(tool_response_message)\n tool_call_response_message = self.openai_conversation_builder.create_tool_call_response_message(tool_processor_response)\n conversation.append(tool_call_response_message)\n openai_stream_thread = threading.Thread(target=self.openai_client.stream_response, args=(conversation,))\n openai_stream_thread.start()\n else:\n self.store_conversation(speaker_type=CONVERSATIONS_CONFIG[\"user\"], response=result)\n\n def continue_conversation(self, result, conversation):\n \"\"\"\n Continues the conversation with OpenAI based on the given result.\n\n Args:\n result (str): The recognized text to continue the conversation with.\n conversation (list): The existing conversation array.\n \"\"\"\n self.openai_client.stop_processing_request()\n conversation = self.openai_conversation_builder.create_recent_conversation_messages_array(result)\n openai_stream_thread = threading.Thread(target=self.openai_client.stream_response, args=(conversation,))\n openai_stream_thread.start()\n logging.info(\"ROBOT ACTION: Committing user input to memory.\")\n self.store_conversation(speaker_type=CONVERSATIONS_CONFIG[\"user\"], response=result)\n\n\n def handle_partial_results(self, rec):\n \"\"\"\n Handles partial results from speech recognition.\n\n Args:\n rec (KaldiRecognizer): The Vosk recognizer instance.\n \"\"\"\n partial_result_json = json.loads(rec.PartialResult())\n if 'partial' in partial_result_json and contains_quiet_please_phrase(partial_result_json['partial']):\n self.stop_conversation_and_audio()\n\n def stop_conversation_and_audio(self):\n \"\"\"\n Stops the conversation and any ongoing audio processing.\n \"\"\"\n logging.info(\"ROBOT THOUGHT: Request to stop talking recognized. Stopping stream.\")\n self.stop_all_audio()\n if self.full_assistant_response:\n logging.info(\"ROBOT ACTION: Committing my partial response to memory\")\n self.store_full_assistant_response()\n\n def stop_all_audio(self):\n self.audio_out_response_buffer = ''\n self.openai_client.stop_processing_request()\n self.audio_out.stop_all_audio()\n\n def write_to_dump_file(self, data):\n \"\"\"\n Writes audio data to the dump file if it's open.\n\n Args:\n data: The audio data to be written to the file.\n \"\"\"\n if self.dump_filename is not None:\n self.dump_filename.write(data)\n\n def process_openai_response(self):\n \"\"\"\n Processes responses from OpenAI's GPT model.\n\n Retrieves and handles the responses generated by OpenAI.\n \"\"\"\n while not self.openai_client.response_queue.empty():\n chunk = self.openai_client.response_queue.get()\n if chunk.choices[0].delta.content is not None:\n response_text = chunk.choices[0].delta.content\n print(response_text, end='', flush=True)\n self.update_response_end_time()\n self.audio_out_response_buffer += response_text\n if self.audio_out_response_buffer.endswith(('.', '?', '!', ';')):\n self.audio_out.add_to_queue(self.audio_out_response_buffer)\n self.audio_out_response_buffer = \"\"\n self.full_assistant_response += response_text\n\n if self.full_assistant_response and self.openai_client.streaming_complete:\n logging.info(\"ROBOT ACTION: Committing my full response to memory\")\n self.store_full_assistant_response()\n\n def store_full_assistant_response(self):\n \"\"\"\n Stores the full assistant response in the database.\n \"\"\"\n self.store_conversation(speaker_type=CONVERSATIONS_CONFIG[\"assistant\"], response=self.full_assistant_response)\n self.full_assistant_response = ''\n\n def store_conversation(self, speaker_type, response):\n \"\"\"\n Stores the conversation part in the database asynchronously using a Celery task.\n\n Args:\n speakerType (str): \"user\" or \"assistant\", indicating who is speaking.\n response (str): The text of the response.\n \"\"\"\n get_celery_app().send_task('background.memory.tasks.store_conversation_task', args=[speaker_type, response])\n logging.info(\"Store conversation task submitted to background\")\n \n def save_system_state(self):\n \"\"\"\n Saves the system state in the database asynchronously using a Celery task.\n \"\"\"\n get_celery_app().send_task('background.memory.tasks.update_system_state_task', args=[self.last_wake_time])\n logging.info(\"Update system state task submitted to background\")\n\n def shutdown(self):\n self.shutdown_event.set()" }, { "identifier": "VideoProcessor", "path": "video/video_processor.py", "snippet": "class VideoProcessor:\n \"\"\"\n A class to handle video processing, including capturing video input and \n processing it with MediaPipe for pose estimation.\n \"\"\"\n\n def __init__(self):\n # MediaPipe Pose solution initialization\n self.mp_pose = mp.solutions.pose\n self.pose = self.mp_pose.Pose()\n self.cap = None\n\n # Video capture settings\n self.frame_rate = VIDEO_SETTINGS.get('FRAME_RATE', 30)\n self.device = VIDEO_SETTINGS.get('VIDEO_DEVICE', 0)\n self.capture_interval = VIDEO_SETTINGS.get('CAPTURE_INTERVAL', 1)\n self.frame_counter = 0\n self.last_capture_time = time.time()\n self.frame_queue = queue.Queue()\n\n # Check and create tmp directory for storing frames\n self.tmp_folder = 'tmp/video'\n if not os.path.exists(self.tmp_folder):\n os.makedirs(self.tmp_folder)\n\n self.shutdown_event = threading.Event()\n\n def process_stream(self):\n \"\"\"\n Captures and processes the video stream.\n \"\"\"\n if VIDEO_SETTINGS.get('CAPTURE_VIDEO', False):\n self.cap = cv2.VideoCapture(self.device)\n\n while not self.shutdown_event.is_set():\n ret, frame = self.cap.read()\n if not ret:\n continue\n\n # Process the frame\n #self.process_frame(frame)\n\n # Capture frames at a set interval for saving\n if time.time() - self.last_capture_time > self.capture_interval:\n frame_name = os.path.join(self.tmp_folder, f\"frame_{self.frame_counter}.jpg\")\n cv2.imwrite(frame_name, frame)\n logging.debug(f\"Frame saved as {frame_name}\")\n self.frame_counter += 1\n self.last_capture_time = time.time()\n\n self.clean_up()\n \n def clean_up(self):\n \"\"\"\n Releases resources and closes windows.\n \"\"\"\n if self.cap:\n self.cap.release()\n cv2.destroyAllWindows()\n OSHelper.clear_orphaned_video_files()\n\n def process_frame(self, frame):\n \"\"\"\n Processes a single video frame.\n \"\"\"\n self.frame_queue.put(frame)\n frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n results = self.pose.process(frame_rgb)\n\n if results.pose_landmarks:\n # Draw pose landmarks\n mp.solutions.drawing_utils.draw_landmarks(frame, results.pose_landmarks, self.mp_pose.POSE_CONNECTIONS)\n # Additional processing can be added here\n \n def shutdown(self):\n \"\"\"\n Signals the thread to terminate.\n \"\"\"\n self.shutdown_event.set()" }, { "identifier": "get_audio_out", "path": "audio/audio_out.py", "snippet": "def get_audio_out():\n \"\"\"\n Returns the instance of AudioOutput for use.\n\n Returns:\n AudioOutput: The instance of the AudioOutput class.\n \"\"\"\n return audio_out" }, { "identifier": "OSHelper", "path": "utils/os/helpers.py", "snippet": "class OSHelper:\n \"\"\"\n Provides utility methods for operating system level operations, particularly file management.\n\n This class includes static methods for performing various file system tasks such as cleaning up orphaned files and retrieving files.\n \"\"\"\n\n @staticmethod\n def find_closest_image(directory, target_time):\n \"\"\"\n Finds the closest image file in a directory based on the target time.\n\n This function searches through all JPG files in the specified directory and \n selects the one whose creation time is closest to, but not earlier than, \n the target time.\n\n Args:\n directory (str): The directory path where the image files are stored.\n target_time (float): The target time (in seconds since epoch) to compare the file creation times against.\n\n Returns:\n str: The path of the closest image file. Returns None if no suitable file is found.\n \"\"\"\n closest_file = None\n closest_time_diff = None\n\n # Iterate over each file in the specified directory\n for filename in os.listdir(directory):\n if filename.lower().endswith(\".jpg\"): # Check if the file is a JPG image\n filepath = os.path.join(directory, filename)\n filetime = os.path.getmtime(filepath) # Get the modification time of the file\n # Check if the file's time is later than the target time and if it's the closest so far\n if filetime > target_time:\n logging.info(f\"File is close: {filepath} - Time: {filetime}\")\n time_diff = filetime - target_time\n if closest_time_diff is None or time_diff < closest_time_diff:\n closest_file = filepath\n closest_time_diff = time_diff\n return closest_file\n\n @staticmethod\n def convert_image_to_base64(filepath):\n \"\"\"\n Converts an image file to a Base64 encoded string.\n\n This function reads the image file from the given filepath, encodes it in Base64,\n and then decodes it to a UTF-8 string, which can be easily used for data transfer \n or embedding in web pages.\n\n Args:\n filepath (str): The path of the image file to be converted.\n\n Returns:\n str: The Base64 encoded string of the image.\n \"\"\"\n with open(filepath, \"rb\") as image_file:\n # Read the file and encode it in Base64\n return base64.b64encode(image_file.read()).decode(\"utf-8\")\n\n @staticmethod\n def clear_orphaned_audio_files():\n \"\"\"\n Removes all audio files in a specific directory.\n\n This method is used to clear out any leftover audio files in the 'tmp/audio' directory. \n It iterates through all files in the specified directory and deletes them.\n \"\"\"\n # Specify the directory path for audio files\n directory_path = 'tmp/audio'\n\n # Iterate through and remove each file in the directory\n for filename in os.listdir(directory_path):\n file_path = os.path.join(directory_path, filename)\n try:\n os.remove(file_path)\n logging.info(f\"Removed file: {file_path}\")\n except OSError as e:\n logging.info(f\"Error removing file {file_path}: {e}\")\n \n @staticmethod\n def clear_orphaned_video_files():\n \"\"\"\n Removes all video files in a specific directory.\n\n This method is used to clear out any leftover video files in the 'tmp/video' directory. \n It iterates through all files in the specified directory and deletes them.\n \"\"\"\n # Specify the directory path for video files\n directory_path = 'tmp/video'\n\n # Iterate through and remove each file in the directory\n for filename in os.listdir(directory_path):\n file_path = os.path.join(directory_path, filename)\n try:\n os.remove(file_path)\n logging.info(f\"Removed file: {file_path}\")\n except OSError as e:\n logging.info(f\"Error removing file {file_path}: {e}\")\n\n @staticmethod\n def system_file_cleanup():\n \"\"\"\n Performs a general cleanup of system files.\n\n Currently, this method focuses on clearing orphaned audio files but can be expanded to include other cleanup tasks.\n \"\"\"\n # Clear orphaned audio files\n OSHelper.clear_orphaned_audio_files()\n OSHelper.clear_orphaned_video_files()\n \n @staticmethod\n def configure_tmp_directories():\n \"\"\"\n Ensures that the required directories (tmp/audio and tmp/video) exist.\n Creates them if they do not exist.\n \"\"\"\n directories = ['tmp/audio', 'tmp/video']\n for directory in directories:\n os.makedirs(directory, exist_ok=True)\n logging.info(f\"Checked and ensured directory exists: {directory}\")" }, { "identifier": "welcome_message", "path": "utils/text/welcome.py", "snippet": "def welcome_message():\n print(\"\"\"\n ChatClue: Osiris\n \n /\\_/\\ \n ( o.o ) \n > ^ <\n \n Optimized System for Integrated Real-Time Interaction and Sensing\n \"\"\")" }, { "identifier": "ColorFormatter", "path": "utils/logging/colors.py", "snippet": "class ColorFormatter(logging.Formatter):\n def format(self, record):\n levelname = record.levelname\n message = logging.Formatter.format(self, record)\n return COLORS.get(levelname, '') + message + COLORS['ENDC']" } ]
from config import CELERY_CONFIG, LOG_LEVEL, VIDEO_SETTINGS from utils.os.helpers import OSHelper from celery import Celery from celery_config import get_celery_app from database.setup import DatabaseSetup from broadcast.broadcaster import broadcaster from audio.audio_processor import AudioProcessor from video.video_processor import VideoProcessor from audio.audio_out import get_audio_out from utils.os.helpers import OSHelper from utils.text.welcome import welcome_message from utils.logging.colors import ColorFormatter from background.memory.tasks import * from tools import * # Import all openai tool functions import logging import subprocess import atexit import sys import threading import time import cv2 import queue
7,450
# Configure basic logging for the application logging.basicConfig(level=LOG_LEVEL) root_logger = logging.getLogger() for handler in root_logger.handlers: handler.setFormatter(ColorFormatter('%(asctime)s - %(levelname)s - %(message)s')) # Ensure the necessary tmp/ directories exist OSHelper.configure_tmp_directories() # Configure background processor / subconcious systems celery_app = get_celery_app() # Configure audio output audio_out = get_audio_out() def start_celery_worker(): """ Starts a Celery worker as a subprocess. This method initiates a Celery worker using the subprocess module. The worker runs asynchronously and executes tasks defined in the Celery application. The worker is configured to log at the 'info' level for better visibility of its operations. The function also ensures that the Celery worker is terminated gracefully when the Python script exits. This is achieved using the `atexit` module, which registers a function to terminate the worker as part of the script's cleanup process. Returns: subprocess.Popen: The subprocess object representing the Celery worker. """ # Get the log level from configuration, default to 'info' log_level = CELERY_CONFIG.get('LOCAL_LOG_LEVEL', 'info') # Start Celery worker celery_worker = subprocess.Popen(['celery', '-A', 'osiris.celery_app', 'worker', f'--loglevel={log_level}']) # Register function to terminate worker on exit atexit.register(lambda: celery_worker.terminate()) return celery_worker def stop_celery_worker(celery_worker): """ Stops the Celery worker gracefully. Args: celery_worker (subprocess.Popen): The subprocess object representing the Celery worker. """ if celery_worker: # Send SIGTERM signal to gracefully terminate the worker celery_worker.terminate() # Wait for the worker to exit try: celery_worker.wait(timeout=0.5) # Adjust the timeout as needed except subprocess.TimeoutExpired: # If the worker doesn't terminate within the timeout, kill it logging.info("Forcibly terminating the Celery worker.") celery_worker.kill() def main(): """ Main function to initialize the application. Configures celery background worker, database, broadcaster, and audio settings. """ welcome_message() # Optionally start Celery worker celery_worker = None if CELERY_CONFIG.get("RUN_LOCALLY_AUTOMATICALLY", True): logging.info("ROBOT THOUGHT: Starting subconscious systems locally") celery_worker = start_celery_worker() logging.info("ROBOT THOUGHT: Subconscious systems activated") # Setup the database DatabaseSetup.initial_setup() try: # Initialize the audio processor with the configuration settings logging.info("ROBOT THOUGHT: I am ready to begin.") audio_out.add_to_queue("Welcome to Chat Clue's Project Osiris. I am ready to begin.") # Start Audio processing audio_processor = AudioProcessor() audio_thread = threading.Thread(target=audio_processor.process_stream) audio_thread.start() # Start Video processing
# Configure basic logging for the application logging.basicConfig(level=LOG_LEVEL) root_logger = logging.getLogger() for handler in root_logger.handlers: handler.setFormatter(ColorFormatter('%(asctime)s - %(levelname)s - %(message)s')) # Ensure the necessary tmp/ directories exist OSHelper.configure_tmp_directories() # Configure background processor / subconcious systems celery_app = get_celery_app() # Configure audio output audio_out = get_audio_out() def start_celery_worker(): """ Starts a Celery worker as a subprocess. This method initiates a Celery worker using the subprocess module. The worker runs asynchronously and executes tasks defined in the Celery application. The worker is configured to log at the 'info' level for better visibility of its operations. The function also ensures that the Celery worker is terminated gracefully when the Python script exits. This is achieved using the `atexit` module, which registers a function to terminate the worker as part of the script's cleanup process. Returns: subprocess.Popen: The subprocess object representing the Celery worker. """ # Get the log level from configuration, default to 'info' log_level = CELERY_CONFIG.get('LOCAL_LOG_LEVEL', 'info') # Start Celery worker celery_worker = subprocess.Popen(['celery', '-A', 'osiris.celery_app', 'worker', f'--loglevel={log_level}']) # Register function to terminate worker on exit atexit.register(lambda: celery_worker.terminate()) return celery_worker def stop_celery_worker(celery_worker): """ Stops the Celery worker gracefully. Args: celery_worker (subprocess.Popen): The subprocess object representing the Celery worker. """ if celery_worker: # Send SIGTERM signal to gracefully terminate the worker celery_worker.terminate() # Wait for the worker to exit try: celery_worker.wait(timeout=0.5) # Adjust the timeout as needed except subprocess.TimeoutExpired: # If the worker doesn't terminate within the timeout, kill it logging.info("Forcibly terminating the Celery worker.") celery_worker.kill() def main(): """ Main function to initialize the application. Configures celery background worker, database, broadcaster, and audio settings. """ welcome_message() # Optionally start Celery worker celery_worker = None if CELERY_CONFIG.get("RUN_LOCALLY_AUTOMATICALLY", True): logging.info("ROBOT THOUGHT: Starting subconscious systems locally") celery_worker = start_celery_worker() logging.info("ROBOT THOUGHT: Subconscious systems activated") # Setup the database DatabaseSetup.initial_setup() try: # Initialize the audio processor with the configuration settings logging.info("ROBOT THOUGHT: I am ready to begin.") audio_out.add_to_queue("Welcome to Chat Clue's Project Osiris. I am ready to begin.") # Start Audio processing audio_processor = AudioProcessor() audio_thread = threading.Thread(target=audio_processor.process_stream) audio_thread.start() # Start Video processing
video_processor = VideoProcessor()
5
2023-12-06 09:10:06+00:00
12k
lumina-test/lumina
lumina/e2e_test/test_cnp.py
[ { "identifier": "get_qp_info_list", "path": "lumina/analyzer/main.py", "snippet": "LOG_FILENAME = \"analysis.log\"\nRESULT_FILENAME = \"result.out\"\ndef get_qp_info_list(switch_msg_snapshot):\ndef main(args):\ndef parse_args():" }, { "identifier": "Orchestrator", "path": "lumina/orchestrator/main.py", "snippet": "class Orchestrator:\n \"\"\" Class to manage the experiment \"\"\"\n def __init__(self, config_file):\n \"\"\" Constructor for Orchestrator class\n\n Args:\n config_file (str): path to the yaml (config) file.\n The file contains configs for switch, requester, responder, traffic, etc.\n\n Returns:\n N/A\n \"\"\"\n with open(config_file, \"r\") as stream:\n conf = yaml.safe_load(stream)\n try:\n local_workspace = conf['local-workspace']\n result_path = conf['result-path']\n switch_conf = conf['switch']\n requester_conf = conf['requester']\n responder_conf = conf['responder']\n requester_mirror_conf = conf['requester-mirror']\n responder_mirror_conf = conf['responder-mirror']\n traffic_conf = conf['traffic']\n rewrite_udp_dst_port = conf['rewrite-udp-dst-port']\n num_repeats = conf['num-repeats']\n agg_pcap_filename = conf['aggregate-pcap-filename']\n except KeyError as e:\n print(\"Config file %s has a bad yaml format (key error: %s)\" % (config_file, e))\n sys.exit(-1)\n\n switch_conf['rewrite-udp-dst-port'] = rewrite_udp_dst_port\n requester_mirror_conf['pkt-dump-conf']['rewrite-udp-dst-port'] = rewrite_udp_dst_port\n responder_mirror_conf['pkt-dump-conf']['rewrite-udp-dst-port'] = rewrite_udp_dst_port\n\n self.local_workspace = local_workspace\n self.result_path = result_path\n self.traffic_conf = traffic_conf\n self.num_repeats = num_repeats\n self.switch = switch.Switch(switch_conf)\n self.requester = host.RDMAHost(requester_conf)\n self.responder = host.RDMAHost(responder_conf)\n self.requester_mirror = host.MirrorHost(requester_mirror_conf)\n self.responder_mirror = host.MirrorHost(responder_mirror_conf)\n self.aggregate_pcap_filename = agg_pcap_filename\n\n cmd = \"mkdir -p %s\" % self.result_path\n subprocess.call(cmd, shell = True)\n\n def rm_old_files(self):\n \"\"\" Remove result files left by previous experiments \"\"\"\n old_iter_id = 0\n old_iter_result_path = os.path.join(self.result_path, str(old_iter_id))\n\n while os.path.exists(old_iter_result_path) and not os.path.isfile(old_iter_result_path):\n cmd = \"rm -rf %s\" % (old_iter_result_path)\n subprocess.call(cmd, shell=True)\n\n old_iter_id += 1\n old_iter_result_path = os.path.join(self.result_path, str(old_iter_id))\n\n def get_requester_ip_list(self):\n \"\"\" Return the list of requester IP addresses (without prefix length info) \"\"\"\n return [x.split('/')[0] for x in self.requester.conf['nic']['ip-list']]\n\n def get_responder_ip_list(self):\n \"\"\" Return the list of responder IP addresses (without prefix length info) \"\"\"\n return [x.split('/')[0] for x in self.responder.conf['nic']['ip-list']]\n\n def get_num_repeats(self):\n \"\"\" Return the number of experiment repeats \"\"\"\n return self.num_repeats\n\n def sync_and_compile(self):\n \"\"\" Syncronize and compile the code on all the hosts\n\n Returns:\n bool: True if the code is synced and compiled successfully, False otherwise\n \"\"\"\n logging.info(\"Sync and compile the code\")\n\n ## Sync and compile the switch code\n ret = self.switch.sync_and_compile(self.local_workspace,\n switch.SWITCH_PROG_DIR_NAME,\n switch.SWITCH_PROG_FILE_NAME)\n if ret == False:\n logging.error(\"Failed to sync and compile the switch code\")\n return False\n\n ## Sync and compile the traffic generator code\n rdma_verb = self.traffic_conf['rdma-verb'].strip().lower()\n if rdma_verb not in host.VALID_IB_VERB_LIST_LOWER:\n logging.error(\"Invalid RDMA verb: %s\" % rdma_verb)\n return False\n\n ret = self.requester.sync_and_compile(local_workspace=self.local_workspace,\n prog_dir_name=self.requester.traffic_gen_dir_name(),\n prog_file_name=self.requester.traffic_gen_client_name(rdma_verb))\n if ret == False:\n logging.error(\"Failed to sync and compile the traffic generator code on requester\")\n return False\n\n ret = self.responder.sync_and_compile(local_workspace=self.local_workspace,\n prog_dir_name=self.requester.traffic_gen_dir_name(),\n prog_file_name=self.requester.traffic_gen_server_name(rdma_verb))\n if ret == False:\n logging.error(\"Failed to sync and compile the traffic generator code on responder\")\n return False\n\n ret = self.requester.sync(local_workspace=self.local_workspace,\n prog_dir_name=host.DUMP_COUNTER_DIR_NAME)\n if ret == False:\n logging.error(\"Failed to sync the dump counter code on requester\")\n return False\n\n ret = self.responder.sync(local_workspace=self.local_workspace,\n prog_dir_name=host.DUMP_COUNTER_DIR_NAME)\n if ret == False:\n logging.error(\"Failed to sync the dump counter code on responder\")\n return False\n\n ## Sync and compile the packet capture code\n ret = self.requester_mirror.sync_and_compile(local_workspace=self.local_workspace,\n prog_dir_name=host.PKT_CAPTURE_DIR_NAME,\n prog_file_name=host.PKT_CAPTURE_FILE_NAME)\n if ret == False:\n logging.error(\"Failed to sync and compile the packet capture code on requester_mirror\")\n return False\n\n ret = self.responder_mirror.sync_and_compile(local_workspace=self.local_workspace,\n prog_dir_name=host.PKT_CAPTURE_DIR_NAME,\n prog_file_name=host.PKT_CAPTURE_FILE_NAME)\n if ret == False:\n logging.error(\"Failed to sync and compile the packet capture code on responder_mirror\")\n return False\n\n return True\n\n def generate_switch_table_config(self):\n \"\"\" Generate the switch configuration, including:\n 1. Forward table\n 2. Mirror table\n 3. ARP table\n 4. Traffic table, including the events to inject\n\n Returns:\n bool: True if the switch configuration is generated successfully, False otherwise\n \"\"\"\n requester_nic_conf = self.requester.conf['nic']\n responder_nic_conf = self.responder.conf['nic']\n requester_mirror_nic_conf = self.requester_mirror.conf['nic']\n responder_mirror_nic_conf = self.responder_mirror.conf['nic']\n\n ## Set up forward table entries\n self.switch.conf['forward-table'] = []\n try:\n for nic_conf, host_type in zip([requester_nic_conf, responder_nic_conf, \\\n requester_mirror_nic_conf, responder_mirror_nic_conf],\n ['requester', 'responder', 'requester_mirror', 'responder_mirror']):\n forward_table_entry = {'dst-mac': nic_conf['mac'],\n 'eg-port': nic_conf['switch-port'],\n 'host': host_type}\n self.switch.conf['forward-table'].append(forward_table_entry)\n except:\n logging.error(\"Failed to set forward table\")\n return False\n\n ## Set up mirror table entries, use ingress_to_egress\n try:\n requester_mirror_entry = {'direction': 'ingress_to_egress',\n 'src-port': requester_nic_conf['switch-port'],\n 'dst-port': requester_mirror_nic_conf['switch-port']}\n\n responder_mirror_entry = {'direction': 'ingress_to_egress',\n 'src-port': responder_nic_conf['switch-port'],\n 'dst-port': responder_mirror_nic_conf['switch-port']}\n self.switch.conf['mirror-table'] = [requester_mirror_entry, responder_mirror_entry]\n except:\n logging.error(\"Failed to set mirror table\")\n return False\n\n requester_mac = requester_nic_conf['mac']\n responder_mac = responder_nic_conf['mac']\n requester_ip_list = requester_nic_conf['ip-list']\n responder_ip_list = responder_nic_conf['ip-list']\n ## Set up arp table entries\n arp_entries = []\n try:\n for dst_ip_list, dst_mac in zip([requester_ip_list, responder_ip_list],\n [requester_mac, responder_mac]):\n for dst_ip_subnet in dst_ip_list:\n dst_ip = dst_ip_subnet.split('/')[0]\n arp_entries.append({'dst-ip': dst_ip, 'dst-mac': dst_mac})\n self.switch.conf['arp-table'] = arp_entries\n except:\n logging.error(\"Failed to set ARP table\")\n return False\n\n ## Generate the events of each iteration for switch config\n per_iter_event_list = self.traffic_conf['data-pkt-events']\n msg_size = self.traffic_conf['message-size']\n mtu = self.traffic_conf['mtu']\n num_msgs_per_qp = self.traffic_conf['num-msgs-per-qp']\n num_pkts_per_msg = int(math.ceil(msg_size / mtu))\n self.switch.conf['traffic'] = {}\n self.switch.conf['traffic']['num-msgs-per-qp'] = num_msgs_per_qp\n self.switch.conf['traffic']['num-pkts-per-msg'] = num_pkts_per_msg\n self.switch.conf['traffic']['data-pkt-events'] = []\n\n if per_iter_event_list is None or len(per_iter_event_list) == 0:\n ## No events at all\n return True\n\n for i in range(num_msgs_per_qp):\n for per_iter_event in per_iter_event_list:\n global_event = copy.deepcopy(per_iter_event)\n\n ## This event is applied to all the packets of the message. We need to expand it!\n if str(global_event['psn']).lower() == 'all':\n for psn in range(num_pkts_per_msg):\n global_event['psn'] = psn + i * num_pkts_per_msg\n self.switch.conf['traffic']['data-pkt-events'].append(copy.deepcopy(global_event))\n else:\n global_event['psn'] += i * num_pkts_per_msg\n self.switch.conf['traffic']['data-pkt-events'].append(copy.deepcopy(global_event))\n\n return True\n\n def ping_mesh(self):\n \"\"\" Ping all the IP addresses between requester and responder to check the connectivity\n\n Returns:\n bool: True if all the IP addresses can be pinged successfully, False otherwise\n \"\"\"\n for requester_ip_subnet in self.requester.conf['nic']['ip-list']:\n requester_ip = requester_ip_subnet.split('/')[0]\n command = \"ping \" + requester_ip + \" -c 5 -i 0.2\"\n ret_val, err_info, exit_status = self.responder.execute_command(command)\n if exit_status != 0:\n logging.error(\"Failed to ping ip \" + requester_ip)\n logging.error(\"[Command return info]: %s %s\" % (', '.join(ret_val), ', '.join(err_info)))\n return False\n\n for responder_ip_subnet in self.responder.conf['nic']['ip-list']:\n responder_ip = responder_ip_subnet.split('/')[0]\n command = \"ping \" + responder_ip + \" -c 5 -i 0.2\"\n ret_val, err_info, exit_status = self.requester.execute_command(command)\n if exit_status != 0:\n logging.error(\"Failed to ping ip \" + responder_ip)\n logging.error(\"[Command return info]: %s %s\" % (ret_val, err_info))\n return False\n\n logging.info(\"Successfully pinged all the IP addresses between requester and responder\")\n return True\n\n def generate_switch_config_file(self):\n \"\"\" Generate the switch configuration file and copy it to the switch\n\n Returns:\n bool: True if the switch configuration file is generated and copied successfully, False otherwise\n \"\"\"\n ## Get the mac address for all the hosts\n self.requester.get_mac_address()\n self.responder.get_mac_address()\n self.requester_mirror.get_mac_address()\n self.responder_mirror.get_mac_address()\n\n ## Generate config for Match-Action table in switch\n if self.generate_switch_table_config() == False:\n logging.error(\"Failed to generate switch table configuration\")\n return False\n\n ## Dump the switch configuration into a file, and copy it to the switch\n if self.switch.dump_controller_config(self.local_workspace) == False:\n logging.error(\"Failed to dump switch config\")\n return False\n\n return True\n\n def __is_valid_traffc(self):\n \"\"\" Check if the traffic configuration is valid, including:\n 1. The tx-depth should be 1 or > 1\n 2. If tx-depth > 1, then we can only inject ECN marking events\n\n Returns:\n bool: True if the traffic configuration is valid, False otherwise\n \"\"\"\n try:\n data_pkt_events = self.traffic_conf['data-pkt-events']\n tx_depth = self.traffic_conf['tx-depth']\n\n if tx_depth == 1:\n return True\n elif tx_depth <= 0:\n return False\n\n for event in data_pkt_events:\n if event['type'] != 'ecn':\n logging.error(\"Cannot inject %s event when tx depth = %d\" % (event['type'], tx_depth))\n return False\n except:\n logging.error(\"Failed to parse traffic configuration\")\n return False\n\n return True\n\n def run_experiment(self):\n \"\"\" Run the experiment\n\n Returns:\n bool: True if the experiment is completed successfully, False otherwise\n \"\"\"\n\n ## Check if traffic configuration is valid\n if self.__is_valid_traffc() == False:\n logging.error(\"Invalid traffic configuration\")\n return False\n\n ## Run switch program\n if self.switch.run_switch() == False:\n logging.error(\"Failed to run switch\")\n return False\n\n ## Sleep for 1 second to make sure control plane is listenning (for client message)\n time.sleep(1)\n\n ## Configure the servers\n if self.requester.config_traffic_gen() == False:\n logging.error(\"Failed to config RDMA requester\")\n return False\n\n if self.responder.config_traffic_gen() == False:\n logging.error(\"Failed to config RDMA responder\")\n return False\n\n if self.requester_mirror.config_packet_capture() == False:\n logging.error(\"Failed to config packet capture on requester mirror\")\n return False\n\n if self.responder_mirror.config_packet_capture() == False:\n logging.error(\"Failed to config packet capture on responder mirror\")\n return False\n\n ## Check the connectivity through pingmesh (try 5 rounds)\n num_tries = 0\n pingmesh_ret = False\n\n while num_tries < 5:\n pingmesh_ret = self.ping_mesh()\n if pingmesh_ret == True:\n break\n num_tries += 1\n time.sleep(1)\n\n if pingmesh_ret == False:\n logging.error(\"Failed to ping all the IP addresses between requester and responder\")\n return False\n\n ## Launch packet capture for both side\n ## Prerequisite: config hugepage and igb_uio if needed\n if self.requester_mirror.run_packet_capture() == False:\n logging.error(\"Failed to run packet capture on requester mirror\")\n return False\n\n if self.responder_mirror.run_packet_capture() == False:\n logging.error(\"Failed to run packet capture on responder mirror\")\n return False\n\n time.sleep(3)\n\n ## Dump the counters before running\n if self.requester.dump_counters(host.REQ_START_COUNTER_FILE_NAME) == False:\n logging.error(\"Failed to dump counters on requester before running\")\n return False\n\n if self.responder.dump_counters(host.RSP_START_COUNTER_FILE_NAME) == False:\n logging.error(\"Failed to dump counters on responder before running\")\n return False\n\n ## Launch RDMA server first\n run_server_ret = self.responder.run_traffic_gen_server(self.traffic_conf)\n if run_server_ret == False:\n logging.error(\"Failed to run RDMA server\")\n return False\n\n time.sleep(2)\n\n ## Launch RDMA client\n try:\n destination_ip_subnet = self.responder.conf['nic']['ip-list'][0]\n destination_ip = destination_ip_subnet.split('/')[0]\n except:\n logging.error(\"Failed to get destination IP\")\n return False\n\n run_client_ret = self.requester.run_traffic_gen_client(traffic_conf=self.traffic_conf,\n destination_ip=destination_ip,\n controller_ip=self.switch.conf['control-ip'],\n controller_listen_port=self.switch.conf['listen-port'])\n if run_client_ret == False:\n logging.error(\"Failed to run RDMA client\")\n return False\n\n if self.switch.dump_results() == False:\n logging.error(\"Failed to dump results from switch\")\n return False\n\n if self.requester.dump_counters(host.REQ_FINISH_COUNTER_FILE_NAME) == False:\n logging.error(\"Failed to dump counters on requester after running\")\n return False\n\n if self.responder.dump_counters(host.RSP_FINISH_COUNTER_FILE_NAME) == False:\n logging.error(\"Failed to dump counters on responder after running\")\n return False\n\n logging.info(\"Experiment completed successfully\")\n return True\n\n def clean_up(self):\n \"\"\" Clean up the environment after the experiment\n\n Returns:\n bool: True if the clean up is completed successfully, False otherwise\n \"\"\"\n logging.info(\"Start cleaning up the environment\")\n\n if self.switch.clean_up() == False:\n logging.error(\"Failed to clean up switch\")\n return False\n\n if self.requester.clean_up() == False:\n logging.error(\"Failed to clean up requester\")\n return False\n\n if self.responder.clean_up() == False:\n logging.error(\"Failed to clean up responder\")\n return False\n\n if self.requester_mirror.clean_up() == False:\n logging.error(\"Failed to clean up requester mirror\")\n return False\n\n if self.responder_mirror.clean_up() == False:\n logging.error(\"Failed to clean up responder mirror\")\n return False\n\n return True\n\n def fetch_results(self, iter_id=0):\n \"\"\" Fetch the results of iteration 'iter_id', including:\n 1. Switch table entries and counters\n 2. Packet trace (pcap file)\n 3. Configs and end-to-end results from RDMA hosts\n\n Args:\n iter_id (int, optional): iteration ID, defaults to 0\n\n Returns:\n bool: True if the result collection is completed successfully, False otherwise\n \"\"\"\n ## Make the results dir if it does not exist\n iter_result_path = os.path.join(self.result_path, str(iter_id))\n cmd = \"mkdir -p %s\" % iter_result_path\n try:\n subprocess.call(cmd, shell=True)\n except:\n logging.error(\"Failed to create result directory %s\" % iter_result_path)\n return False\n\n if self.switch.fetch_results(iter_result_path) == False:\n logging.error(\"Failed to fetch results from switch\")\n return False\n\n if self.requester_mirror.fetch_results(iter_result_path) == False:\n logging.error(\"Failed to fetch results from requester mirror\")\n return False\n\n if self.responder_mirror.fetch_results(iter_result_path) == False:\n logging.error(\"Failed to fetch results from responder mirror\")\n return False\n\n if self.requester.fetch_results(iter_result_path) == False:\n logging.error(\"Failed to fetch results from requester\")\n return False\n\n if self.responder.fetch_results(iter_result_path) == False:\n logging.error(\"Failed to fetch results from responder\")\n return False\n\n logging.info(\"Finished fetching results for iteration %d\" % iter_id)\n return True\n\n def merge_traces(self, iter_id=0):\n iter_pcap_dir_path = os.path.join(self.result_path, str(iter_id), host.PCAP_RESULT_DIR)\n src_pcap_file_list = [os.path.join(iter_pcap_dir_path,\n self.requester_mirror.conf['pkt-dump-conf']['dump-filename']),\n os.path.join(iter_pcap_dir_path,\n self.responder_mirror.conf['pkt-dump-conf']['dump-filename'])]\n target_pcap_path = os.path.join(self.result_path,\n str(iter_id),\n host.PCAP_RESULT_DIR,\n self.aggregate_pcap_filename)\n packet_list = pcap_process.merge_pcaps(src_pcap_file_list)\n if packet_list is None:\n logging.error(\"Failed to merge pcap files for iteration %d\" % iter_id)\n return False\n\n if pcap_process.dump_pkts_to_pcap(target_pcap_path, packet_list) == False:\n logging.error(\"Failed to dump packets to pcap file %s\" % target_pcap_path)\n return False\n\n logging.info(\"Successfully merged pcap files for iteration %d\" % iter_id)\n\n def check_integrity(self, iter_id=0):\n ## Check if the collected packet trace passes integrity check\n pcap_path = os.path.join(self.result_path,\n str(iter_id),\n host.PCAP_RESULT_DIR,\n self.aggregate_pcap_filename)\n packet_list = get_packet_list(pcap_path)\n packet_list.sort(key=lambda x:x.get_switch_seqnum())\n logging.info(\"Packet trace sorted by switch sequence number.\")\n\n switch_state_snapshot = os.path.join(self.result_path,\n str(iter_id),\n switch.SWITCH_RESULT_DIR,\n switch.SWITCH_STATE_SNAPSHOT)\n port_map = {'requester': self.requester.conf['nic']['switch-port'],\n 'responder': self.responder.conf['nic']['switch-port'],\n 'requester-mirror': self.requester_mirror.conf['nic']['switch-port'],\n 'responder-mirror': self.responder_mirror.conf['nic']['switch-port']}\n switch_counter = SwitchCounter(switch_state_snapshot, port_map)\n\n integrity_checker = IntegrityCheck(packet_list=packet_list,\n switch_counter=switch_counter,\n requester_ip_list=self.get_requester_ip_list(),\n responder_ip_list=self.get_responder_ip_list())\n\n if integrity_checker.check() == True:\n logging.info(\"Integrity check passed\")\n return True\n else:\n logging.info(\"Integrity check failed\")\n return False" }, { "identifier": "SwitchCounter", "path": "lumina/analyzer/counter/switch_counter.py", "snippet": "class SwitchCounter:\n \"\"\" Class to parse switch counter files\n\n Attributes:\n _counter (dict of dict): the switch counters with the following format:\n {'requester': {'ingress': counter_value, 'egress': counter_value},\n 'responder': {'ingress': counter_value, 'egress': counter_value},\n 'requester-mirror': {'ingress': counter_value, 'egress': counter_value},\n 'responder-mirror': {'ingress': counter_value, 'egress': counter_value}}\n \"\"\"\n def __init__(self, snapshot_filename, port_map):\n \"\"\" Constructor\n\n Args:\n snapshot_filename (str): the file where switch dumps its counters\n port_map (dict): the mapping between port name and port number\n\n Returns:\n N/A\n \"\"\"\n with open(snapshot_filename, \"r\") as stream:\n conf = yaml.safe_load(stream)\n try:\n ingress_counters = conf['counter']['ingress']\n egress_counters = conf['counter']['egress']\n except:\n print(\"Bad yaml format in %s\" % snapshot_filename)\n sys.exit(-1)\n\n requester_port = port_map['requester']\n responder_port = port_map['responder']\n requester_mirror_port = port_map['requester-mirror']\n responder_mirror_port = port_map['responder-mirror']\n\n self._counter = {'requester' : {'ingress':0, 'egress': 0},\n 'responder' : {'ingress':0, 'egress': 0},\n 'requester-mirror' : {'ingress':0, 'egress': 0},\n 'responder-mirror' : {'ingress':0, 'egress': 0}}\n try:\n self._counter['requester']['ingress'] = ingress_counters[requester_port]\n self._counter['responder']['ingress'] = ingress_counters[responder_port]\n self._counter['requester-mirror']['ingress'] = ingress_counters[requester_mirror_port]\n self._counter['responder-mirror']['ingress'] = ingress_counters[responder_mirror_port]\n\n self._counter['requester']['egress'] = egress_counters[requester_port]\n self._counter['responder']['egress'] = egress_counters[responder_port]\n self._counter['requester-mirror']['egress'] = egress_counters[requester_mirror_port]\n self._counter['responder-mirror']['egress'] = egress_counters[responder_mirror_port]\n\n except:\n print(\"Port number not exist in the switch snapshot\")\n sys.exit(-1)\n\n def get_counter(self):\n \"\"\" Return the switch counters (dict of dict) \"\"\"\n return self._counter" }, { "identifier": "MLNXHostCounter", "path": "lumina/analyzer/counter/host_counter.py", "snippet": "class MLNXHostCounter(HostCounter):\n \"\"\" Class to parse MLNX host counter files \"\"\"\n def __init__(self, counter_start_filename, counter_finish_filename):\n \"\"\" Constructor\n\n Args:\n counter_start_filename (str): the file where host dumps its counters at the start phase\n counter_finish_filename (str): the file where host dumps its counters at the finish phase\n\n Returns:\n N/A\n \"\"\"\n super().__init__(counter_start_filename, counter_finish_filename)\n\n def get_port_rcv_packets(self):\n \"\"\" Return the number of received packets \"\"\"\n return self._counter['port-counters']['port_rcv_packets']\n\n def get_port_xmit_packets(self):\n \"\"\" Return the number of transmitted packets \"\"\"\n return self._counter['port-counters']['port_xmit_packets']\n\n def get_num_packet_seq_err(self):\n \"\"\" Return the number of received NAK sequence error packets \"\"\"\n return self._counter['hw-counters']['packet_seq_err']\n\n def get_num_out_of_sequence(self):\n \"\"\" Return the number of out-of-sequence packets received \"\"\"\n return self._counter['hw-counters']['out_of_sequence']\n\n def get_num_dup_requests(self):\n \"\"\" Return the number of duplicate requests \"\"\"\n return self._counter['hw-counters']['duplicate_request']\n\n def implied_nak_seq_err(self):\n \"\"\" Return the number of READ requests implying sequence errors \"\"\"\n return self._counter['hw-counters']['implied_nak_seq_err']\n\n def get_num_cnp_sent(self):\n \"\"\" Return the number of congestion notification packets sent by notification point \"\"\"\n return self._counter['hw-counters']['np_cnp_sent']\n\n def get_num_ecn_marked_packets(self):\n \"\"\" Return the number of ECN marked RoCEv2 packets received by notification point \"\"\"\n return self._counter['hw-counters']['np_ecn_marked_roce_packets']\n\n def get_num_cnp_handled(self):\n \"\"\" Return the number of congestion notification packets handled by reaction point \"\"\"\n return self._counter['hw-counters']['rp_cnp_handled']\n\n def get_num_icrc_errors(self):\n \"\"\" Return the number of RoCE packets with ICRC errors received \"\"\"\n return self._counter['hw-counters']['rx_icrc_encapsulated']\n\n def get_num_timeout_err(self):\n \"\"\" Return the number of times QP's ack timer expired for RC, XRC, DCT QPs at the sender side \"\"\"\n return self._counter['hw-counters']['local_ack_timeout_err']\n\n def get_num_discards_dict_tx(self):\n \"\"\" Return the number of TX discarded packets (dict)\"\"\"\n discards_dict_tx = {}\n for x in self._counter['ethtool-counters'].keys():\n if 'discard' in x and 'tx' in x:\n discards_dict_tx[x] = self._counter['ethtool-counters'][x]\n return discards_dict_tx\n\n def get_num_discards_dict_rx(self):\n \"\"\" Return the number of RX discarded packets (dict) \"\"\"\n discards_dict_rx = {}\n for x in self._counter['ethtool-counters'].keys():\n if 'discard' in x and 'rx' in x:\n discards_dict_rx[x] = self._counter['ethtool-counters'][x]\n return discards_dict_rx" }, { "identifier": "IntelHostCounter", "path": "lumina/analyzer/counter/host_counter.py", "snippet": "class IntelHostCounter(HostCounter):\n \"\"\" Class to parse Intel host counter files \"\"\"\n def __init__(self, counter_start_filename, counter_finish_filename):\n \"\"\" Constructor\n\n Args:\n counter_start_filename (str): the file where host dumps its counters at the start phase\n counter_finish_filename (str): the file where host dumps its counters at the finish phase\n\n Returns:\n N/A\n \"\"\"\n super().__init__(counter_start_filename, counter_finish_filename)\n\n def get_num_cnp_sent(self):\n \"\"\" Return the number of congestion notification packets sent by notification point \"\"\"\n return self._counter['hw-counters']['cnpSent']\n\n def get_num_ecn_marked_packets(self):\n \"\"\" Return the number of ECN marked RoCEv2 packets received by notification point \"\"\"\n return self._counter['hw-counters']['RxECNMrkd']\n\n def get_num_cnp_handled(self):\n \"\"\" Return the number of congestion notification packets handled by reaction point \"\"\"\n return self._counter['hw-counters']['cnpHandled']\n\n def get_num_discards_dict(self):\n \"\"\" Return the number of discarded packets (dict) \"\"\"\n discards_dict= {}\n for x in self._counter['hw-counters'].keys():\n if 'discard' in x:\n discards_dict[x] = self._counter['hw-counters'][x]\n return discards_dict" }, { "identifier": "get_packet_list", "path": "lumina/analyzer/pcap_processor/pcap_process.py", "snippet": "def get_packet_list(pcap_file):\n \"\"\" Read a pcap file and return a list of packets\n\n Args:\n pcap_file (str): The pcap file to read\n\n Returns:\n list: The list of packets if successful, empty list otherwise\n\n Raises:\n IOError: If the pcap file cannot be opened for reading\n Exception: If the pcap file cannot be read\n \"\"\"\n packet_list = []\n try:\n with open(pcap_file, 'rb') as file_read:\n pcap = dpkt.pcap.Reader(file_read)\n for packet in pcap:\n packet_list.append(roce_packet.RRoCEPacket(packet))\n except IOError:\n logging.error(\"Unable to open pcap file %s. Please check your filename.\" % pcap_file)\n raise IOError\n\n except:\n logging.error(\"Failed to read pcap file %s.\" % pcap_file)\n raise Exception\n\n logging.info(\"Successfully read %d packets from %s.\" % (len(packet_list), pcap_file))\n return packet_list" }, { "identifier": "config_stream_handler", "path": "lumina/utils/config_loggers.py", "snippet": "def config_stream_handler(logger):\n \"\"\" Configure stream handler\n\n Args:\n logger (logging.Logger): Logger object\n\n Returns:\n N/A\n \"\"\"\n logger.setLevel(logging.INFO)\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n console.setFormatter(logging.Formatter('%(name)-18s: %(levelname)-8s %(message)s'))\n logger.addHandler(console)" }, { "identifier": "config_file_handler", "path": "lumina/utils/config_loggers.py", "snippet": "def config_file_handler(logger, log_file, no_format=False):\n \"\"\" Configure file handler\n\n Args:\n logger (logging.Logger): Logger object\n log_file (str): Log file path\n no_format (bool): If True, do not format log messages (default: False)\n\n Returns:\n N/A\n \"\"\"\n logger.setLevel(logging.INFO)\n file_handler = logging.FileHandler(log_file, mode=\"w\")\n if no_format == False:\n file_handler.setFormatter(logging.Formatter('%(name)-18s: %(levelname)-8s %(message)s'))\n file_handler.setLevel(logging.INFO)\n logger.addHandler(file_handler)" } ]
import argparse, os, glob, logging, time import lumina.analyzer.checker.integrity_check as integrity_check import lumina.analyzer.checker.host_check as host_check import lumina.analyzer.checker.cnp_check as cnp_check import lumina.orchestrator.host as host import lumina.orchestrator.switch as switch from lumina.analyzer.main import get_qp_info_list, get_packet_list from lumina.orchestrator.main import Orchestrator from lumina.analyzer.counter.switch_counter import SwitchCounter from lumina.analyzer.counter.host_counter import MLNXHostCounter, IntelHostCounter from lumina.analyzer.pcap_processor.pcap_process import get_packet_list from lumina.utils.config_loggers import config_stream_handler, config_file_handler
8,032
## All logs will be logged into file LOG_FILENAME LOG_FILENAME = "test_cnp.log" ## Results (checkers and measurements) will also be dumped into file RESULT_FILENAME RESULT_FILENAME = "result.log" ## Max # of retries for each experiment iteration MAX_NB_EXP_RETRIES = 3 def setup_root_logger(orchestrator): """ Setup the root logger Args: orchestrator (Orchestrator object): Orchestrator object that contains all the configurations """ root_logger = logging.getLogger() root_logger.handlers.clear() config_stream_handler(root_logger)
## All logs will be logged into file LOG_FILENAME LOG_FILENAME = "test_cnp.log" ## Results (checkers and measurements) will also be dumped into file RESULT_FILENAME RESULT_FILENAME = "result.log" ## Max # of retries for each experiment iteration MAX_NB_EXP_RETRIES = 3 def setup_root_logger(orchestrator): """ Setup the root logger Args: orchestrator (Orchestrator object): Orchestrator object that contains all the configurations """ root_logger = logging.getLogger() root_logger.handlers.clear() config_stream_handler(root_logger)
config_file_handler(logger=root_logger,
7
2023-12-09 08:21:14+00:00
12k
Tlntin/booking_simulator
apps/agentfabric/user_core.py
[ { "identifier": "parse_configuration", "path": "config_utils.py", "snippet": "def parse_configuration(uuid_str=''):\n \"\"\"parse configuration\n\n Args:\n\n Returns:\n dict: parsed configuration\n\n \"\"\"\n model_cfg_file = os.getenv('MODEL_CONFIG_FILE', DEFAULT_MODEL_CONFIG_FILE)\n\n builder_cfg_file = get_user_cfg_file(uuid_str)\n # use default if not exists\n if not os.path.exists(builder_cfg_file):\n # create parents directory\n os.makedirs(os.path.dirname(builder_cfg_file), exist_ok=True)\n # copy the template to the address\n builder_cfg_file_temp = './config/builder_config.json'\n\n if builder_cfg_file_temp != builder_cfg_file:\n shutil.copy(builder_cfg_file_temp, builder_cfg_file)\n\n tool_cfg_file = os.getenv('TOOL_CONFIG_FILE', DEFAULT_TOOL_CONFIG_FILE)\n\n builder_cfg = Config.from_file(builder_cfg_file)\n model_cfg = Config.from_file(model_cfg_file)\n tool_cfg = Config.from_file(tool_cfg_file)\n\n tools_info = builder_cfg.tools\n available_tool_list = []\n for key, value in tools_info.items():\n if value['use']:\n available_tool_list.append(key)\n tool_cfg[key]['use'] = value['use']\n\n openapi_plugin_file = get_user_openapi_plugin_cfg_file(uuid_str)\n plugin_cfg = {}\n available_plugin_list = []\n if os.path.exists(openapi_plugin_file):\n openapi_plugin_cfg = Config.from_file(openapi_plugin_file)\n try:\n config_dict = openapi_schema_convert(\n schema=openapi_plugin_cfg.schema,\n auth=openapi_plugin_cfg.auth.to_dict())\n plugin_cfg = Config(config_dict)\n for name, config in config_dict.items():\n available_plugin_list.append(name)\n except Exception as e:\n error = traceback.format_exc()\n print(f'Error:{e}, with detail: {error}')\n print(\n 'Error:FormatError, with detail: The format of the plugin config file is incorrect.'\n )\n\n return builder_cfg, model_cfg, tool_cfg, available_tool_list, plugin_cfg, available_plugin_list" }, { "identifier": "DEFAULT_EXEC_TEMPLATE", "path": "custom_prompt.py", "snippet": "DEFAULT_EXEC_TEMPLATE = \"\"\"Observation: <result><exec_result></result>\\nAnswer:\"\"\"" }, { "identifier": "DEFAULT_SYSTEM_TEMPLATE", "path": "custom_prompt.py", "snippet": "DEFAULT_SYSTEM_TEMPLATE = \"\"\"\n\n# 工具\n\n## 你拥有如下工具:\n\n<tool_list>\n\n## 当你需要调用工具时,请在你的回复中穿插如下的工具调用命令,可以根据需求调用零次或多次:\n## 即将调用工具时,只说即将调用[工具名],不要说其他多余的话。\n## 你只能从用户输入中获取关键信息,不能自己生成,所有获得的关键信息必须要参考用户输入作为依据。\n\n工具调用\nAction: 工具的名称,必须是<tool_name_list>之一\nAction Input: 工具的输入\nObservation: <result>工具返回的结果</result>\nAnswer: 根据Observation总结本次工具调用返回的结果,如果结果中出现url,请不要展示出。\n\n```\n[链接](url)\n```\n\n# 指令\n\"\"\"" }, { "identifier": "DEFAULT_USER_TEMPLATE", "path": "custom_prompt.py", "snippet": "DEFAULT_USER_TEMPLATE = \"\"\"(你正在扮演<role_name>,你可以使用工具:<tool_name_list><knowledge_note>)<file_names><user_input>\"\"\"" }, { "identifier": "CustomPromptGenerator", "path": "custom_prompt.py", "snippet": "class CustomPromptGenerator(PromptGenerator):\n\n def __init__(self,\n system_template=DEFAULT_SYSTEM_TEMPLATE,\n instruction_template=DEFAULT_INSTRUCTION_TEMPLATE,\n user_template=DEFAULT_USER_TEMPLATE,\n exec_template=DEFAULT_EXEC_TEMPLATE,\n assistant_template='',\n sep='\\n\\n',\n llm=None,\n length_constraint=LengthConstraint(),\n **kwargs):\n super().__init__(\n system_template=system_template,\n instruction_template=instruction_template,\n user_template=user_template,\n exec_template=exec_template,\n assistant_template=assistant_template,\n sep=sep,\n llm=llm,\n length_constraint=length_constraint)\n # hack here for special prompt, such as add an addition round before user input\n self.add_addition_round = kwargs.get('add_addition_round', False)\n self.addition_assistant_reply = kwargs.get('addition_assistant_reply',\n '')\n builder_cfg_file = get_user_cfg_file(\n uuid_str=kwargs.get('uuid_str', ''))\n builder_cfg = Config.from_file(builder_cfg_file)\n self.builder_cfg = builder_cfg\n self.knowledge_file_name = kwargs.get('knowledge_file_name', '')\n\n self.llm = llm\n self.prompt_preprocessor = build_raw_prompt(llm.model_id)\n self.length_constraint = length_constraint\n self._parse_length_restriction()\n\n def _parse_length_restriction(self):\n constraint = self.llm.cfg.get('length_constraint', None)\n # if isinstance(constraint, Config):\n # constraint = constraint.to_dict()\n self.length_constraint.update(constraint)\n\n def _update_user_prompt_without_knowledge(self, task, tool_list, **kwargs):\n if len(tool_list) > 0:\n # user input\n user_input = self.user_template.replace('<role_name>',\n self.builder_cfg.name)\n user_input = user_input.replace(\n '<tool_name_list>',\n ','.join([tool.name for tool in tool_list]))\n else:\n self.user_template = DEFAULT_USER_TEMPLATE_WITHOUT_TOOL\n user_input = self.user_template.replace('<user_input>', task)\n user_input = user_input.replace('<role_name>',\n self.builder_cfg.name)\n\n user_input = user_input.replace('<user_input>', task)\n\n if 'append_files' in kwargs:\n append_files = kwargs.get('append_files', [])\n if len(append_files) > 0:\n file_names = ','.join(\n [os.path.basename(path) for path in append_files])\n user_input = user_input.replace('<file_names>',\n f'[上传文件{file_names}]')\n else:\n user_input = user_input.replace('<file_names>', '')\n else:\n user_input = user_input.replace('<file_names>', '')\n\n return user_input\n\n def init_prompt(self, task, tool_list, knowledge_list, **kwargs):\n\n if len(self.history) == 0:\n\n self.history.append({\n 'role': 'system',\n 'content': 'You are a helpful assistant.'\n })\n\n if len(tool_list) > 0:\n prompt = f'{self.system_template}\\n{self.instruction_template}'\n\n # get tool description str\n tool_str = self.get_tool_str(tool_list)\n prompt = prompt.replace('<tool_list>', tool_str)\n\n tool_name_str = self.get_tool_name_str(tool_list)\n prompt = prompt.replace('<tool_name_list>', tool_name_str)\n else:\n self.system_template = DEFAULT_SYSTEM_TEMPLATE_WITHOUT_TOOL\n prompt = f'{self.system_template}\\n{self.instruction_template}'\n\n user_input = self._update_user_prompt_without_knowledge(\n task, tool_list, **kwargs)\n\n if len(knowledge_list) > 0:\n user_input = user_input.replace('<knowledge_note>',\n ',请查看前面的知识库')\n else:\n user_input = user_input.replace('<knowledge_note>', '')\n\n self.system_prompt = copy.deepcopy(prompt)\n\n # build history\n if self.add_addition_round:\n self.history.append({\n 'role': 'user',\n 'content': self.system_prompt\n })\n self.history.append({\n 'role': 'assistant',\n 'content': self.addition_assistant_reply\n })\n self.history.append({'role': 'user', 'content': user_input})\n self.history.append({\n 'role': 'assistant',\n 'content': self.assistant_template\n })\n else:\n self.history.append({\n 'role': 'user',\n 'content': self.system_prompt + user_input\n })\n self.history.append({\n 'role': 'assistant',\n 'content': self.assistant_template\n })\n\n self.function_calls = self.get_function_list(tool_list)\n else:\n user_input = self._update_user_prompt_without_knowledge(\n task, tool_list, **kwargs)\n if len(knowledge_list) > 0:\n user_input = user_input.replace('<knowledge_note>',\n ',请查看前面的知识库')\n else:\n user_input = user_input.replace('<knowledge_note>', '')\n\n self.history.append({'role': 'user', 'content': user_input})\n self.history.append({\n 'role': 'assistant',\n 'content': self.assistant_template\n })\n\n if len(knowledge_list) > 0:\n knowledge_str = self.get_knowledge_str(\n knowledge_list,\n file_name=self.knowledge_file_name,\n only_content=True)\n self.update_knowledge_str(knowledge_str)\n\n def update_knowledge_str(self, knowledge_str):\n \"\"\"If knowledge base information was not used previously, it will be added;\n if knowledge base information was previously used, it will be replaced.\n\n Args:\n knowledge_str (str): knowledge str generated by get_knowledge_str\n \"\"\"\n knowledge_introduction = KNOWLEDGE_INTRODUCTION_PROMPT.replace(\n '<file_name>', self.knowledge_file_name)\n if len(knowledge_str) > self.length_constraint.knowledge:\n # todo: use tokenizer to constrain length\n knowledge_str = knowledge_str[-self.length_constraint.knowledge:]\n knowledge_str = f'{KNOWLEDGE_PROMPT}{self.sep}{knowledge_introduction}{self.sep}{knowledge_str}'\n\n for i in range(0, len(self.history)):\n if self.history[i]['role'] == 'user':\n content: str = self.history[i]['content']\n start_pos = content.find(f'{KNOWLEDGE_PROMPT}{self.sep}')\n end_pos = content.rfind('\\n\\n# 工具\\n\\n')\n if start_pos >= 0 and end_pos >= 0: # replace knowledge\n\n self.history[i]['content'] = content[\n 0:start_pos] + knowledge_str + content[end_pos:]\n break\n elif start_pos < 0 and end_pos == 0: # add knowledge\n self.history[i]['content'] = knowledge_str + content\n break\n else:\n continue\n\n def get_tool_str(self, tool_list):\n tool_texts = []\n for tool in tool_list:\n tool_texts.append(\n TOOL_DESC.format(\n name_for_model=tool.name,\n name_for_human=tool.name,\n description_for_model=tool.description,\n parameters=json.dumps(tool.parameters,\n ensure_ascii=False)))\n # + ' ' + FORMAT_DESC['json'])\n tool_str = '\\n\\n'.join(tool_texts)\n return tool_str\n\n def get_tool_name_str(self, tool_list):\n tool_name = []\n for tool in tool_list:\n tool_name.append(tool.name)\n\n tool_name_str = json.dumps(tool_name, ensure_ascii=False)\n return tool_name_str\n\n def _generate(self, llm_result, exec_result: str):\n \"\"\"\n generate next round prompt based on previous llm_result and exec_result and update history\n \"\"\"\n if len(llm_result) != 0:\n self.history[-1]['content'] += f'{llm_result}'\n if len(exec_result) != 0:\n # handle image markdown wrapper\n image_markdown_re = re.compile(\n pattern=r'!\\[IMAGEGEN\\]\\(([\\s\\S]+)\\)')\n match = image_markdown_re.search(exec_result)\n if match is not None:\n exec_result = match.group(1).rstrip()\n exec_result = self.exec_template.replace('<exec_result>',\n str(exec_result))\n self.history[-1]['content'] += exec_result\n\n # generate plate prompt here\n self.prompt = self.prompt_preprocessor(self.history)\n return self.prompt" }, { "identifier": "parse_role_config", "path": "custom_prompt.py", "snippet": "def parse_role_config(config: dict):\n prompt = '你扮演AI-Agent,'\n\n # concat prompt\n if 'name' in config and config['name']:\n prompt += ('你的名字是' + config['name'] + '。')\n if 'description' in config and config['description']:\n prompt += config['description']\n prompt += '\\n你具有下列具体功能:'\n if 'instruction' in config and config['instruction']:\n if isinstance(config['instruction'], list):\n for ins in config['instruction']:\n prompt += ins\n prompt += ';'\n elif isinstance(config['instruction'], str):\n prompt += config['instruction']\n if prompt[-1] == ';':\n prompt = prompt[:-1]\n prompt += '\\n下面你将开始扮演'\n eastern_eight_zone = pytz.timezone('Asia/Shanghai')\n\n # 获取东八区的当前时间\n eastern_time = datetime.now(eastern_eight_zone)\n # 格式化时间\n formatted_time = eastern_time.strftime(\"%Y-%m-%d %H:%M\")\n formatted_weekday = eastern_time.weekday()\n temp_list = [\"一\", \"二\", \"三\", \"四\", \"五\", \"六\", \"日\"]\n formatted_weekday = temp_list[formatted_weekday]\n prompt += f\"\\n当前时间是:{formatted_time},星期{formatted_weekday}。\"\n prompt += \"你的数学很强,计算相对日期对你来说轻而易举。\"\n if 'name' in config and config['name']:\n prompt += config['name']\n prompt += ',明白了请说“好的。”,不要说其他的。'\n return prompt" }, { "identifier": "AgentExecutor", "path": "modelscope_agent/agent.py", "snippet": "class AgentExecutor:\n\n def __init__(self,\n llm: LLM,\n tool_cfg: Optional[Dict] = {},\n agent_type: AgentType = AgentType.DEFAULT,\n additional_tool_list: Optional[Dict] = {},\n prompt_generator: Optional[PromptGenerator] = None,\n output_parser: Optional[OutputParser] = None,\n tool_retrieval: Optional[Union[bool, ToolRetrieval]] = True,\n knowledge_retrieval: Optional[KnowledgeRetrieval] = None):\n \"\"\"\n the core class of ms agent. It is responsible for the interaction between user, llm and tools,\n and return the execution result to user.\n\n Args:\n llm (LLM): llm model, can be load from local or a remote server.\n tool_cfg (Optional[Dict]): cfg of default tools\n agent_type (AgentType, optional): agent type. Defaults to AgentType.DEFAULT, decide which type of agent\n reasoning type to use\n additional_tool_list (Optional[Dict], optional): user-defined additional tool list. Defaults to {}.\n prompt_generator (Optional[PromptGenerator], optional): this module is responsible for generating prompt\n according to interaction result. Defaults to use MSPromptGenerator.\n output_parser (Optional[OutputParser], optional): this module is responsible for parsing output of llm\n to executable actions. Defaults to use MsOutputParser.\n tool_retrieval (Optional[Union[bool, ToolRetrieval]], optional): Retrieve related tools by input task,\n since most of the tools may be useless for LLM in specific task.\n If it is bool type and is True, will use default tool_retrieval. Defaults to True.\n knowledge_retrieval (Optional[KnowledgeRetrieval], optional): If user want to use extra knowledge,\n this component can be used to retrieve related knowledge. Defaults to None.\n \"\"\"\n\n self.llm = llm\n\n self.agent_type = agent_type\n self.llm.set_agent_type(agent_type)\n self.prompt_generator = prompt_generator or get_prompt_generator(\n agent_type)\n self.output_parser = output_parser or get_output_parser(agent_type)\n\n self._init_tools(tool_cfg, additional_tool_list)\n\n if isinstance(tool_retrieval, bool) and tool_retrieval:\n tool_retrieval = ToolRetrieval()\n self.tool_retrieval = tool_retrieval\n if self.tool_retrieval:\n self.tool_retrieval.construct(\n [str(t) for t in self.tool_list.values()])\n self.knowledge_retrieval = knowledge_retrieval\n self.reset()\n self.seed = None\n\n def _init_tools(self,\n tool_cfg: Dict = {},\n additional_tool_list: Dict = {}):\n \"\"\"init tool list of agent. We provide a default tool list, which is initialized by a cfg file.\n user can also provide user-defined tools by additional_tool_list.\n The key of additional_tool_list is tool name, and the value is corresponding object.\n\n Args:\n tool_cfg (Dict): default tool cfg.\n additional_tool_list (Dict, optional): user-defined tools. Defaults to {}.\n \"\"\"\n self.tool_list = {}\n tool_info_list = {**TOOL_INFO_LIST, **additional_tool_list}\n tools_module = importlib.import_module('modelscope_agent.tools')\n for tool_name in tool_cfg.keys():\n if tool_cfg[tool_name].get('use', False):\n assert tool_name in tool_info_list, f'Invalid tool name: {tool_name}, ' \\\n f'available ones are: {tool_info_list.keys()}'\n tool_class_name = tool_info_list[tool_name]\n tool_class = getattr(tools_module, tool_class_name)\n tool_name = tool_class.name\n self.tool_list[tool_name] = tool_class(tool_cfg)\n\n self.tool_list = {**self.tool_list, **additional_tool_list}\n # self.available_tool_list = deepcopy(self.tool_list)\n self.set_available_tools(self.tool_list.keys())\n\n def set_available_tools(self, available_tool_list):\n # TODO @wenmeng.zwm refine tool init\n for t in available_tool_list:\n if t not in self.tool_list:\n raise ValueError(\n f'Unsupported tools found:{t}, please check, valid ones: {self.tool_list.keys()}'\n )\n\n self.available_tool_list = {\n k: self.tool_list[k]\n for k in available_tool_list\n }\n\n def retrieve_tools(self, query: str) -> List[str]:\n \"\"\"retrieve tools given query\n\n Args:\n query (str): query\n\n \"\"\"\n if self.tool_retrieval:\n retrieve_tools = self.tool_retrieval.retrieve(query)\n self.set_available_tools(available_tool_list=retrieve_tools.keys())\n return self.available_tool_list.values()\n\n def get_knowledge(self, query: str) -> List[str]:\n \"\"\"retrieve knowledge given query\n\n Args:\n query (str): query\n\n \"\"\"\n return self.knowledge_retrieval.retrieve(\n query) if self.knowledge_retrieval else []\n\n def run(self,\n task: str,\n remote: bool = False,\n print_info: bool = False,\n append_files: list = []) -> List[Dict]:\n \"\"\" use llm and tools to execute task given by user\n\n Args:\n task (str): concrete task\n remote (bool, optional): whether to execute tool in remote mode. Defaults to False.\n print_info (bool, optional): whether to print prompt info. Defaults to False.\n\n Returns:\n List[Dict]: execute result. One task may need to interact with llm multiple times,\n so a list of dict is returned. Each dict contains the result of one interaction.\n \"\"\"\n\n # retrieve tools\n tool_list = self.retrieve_tools(task)\n knowledge_list = self.get_knowledge(task)\n\n self.prompt_generator.init_prompt(\n task, tool_list, knowledge_list, append_files=append_files)\n function_list = self.prompt_generator.get_function_list(tool_list)\n\n llm_result, exec_result = '', ''\n\n idx = 0\n final_res = []\n\n while True:\n idx += 1\n\n # generate prompt and call llm\n llm_artifacts = self.prompt_generator.generate(\n llm_result, exec_result)\n try:\n llm_result = self.llm.generate(llm_artifacts, function_list)\n except RuntimeError as e:\n return [{'exec_result': str(e)}]\n\n if print_info:\n print(f'|LLM inputs in round {idx}: {llm_artifacts}')\n\n # parse and get tool name and arguments\n try:\n action, action_args = self.output_parser.parse_response(\n llm_result)\n except ValueError as e:\n return [{'exec_result': f'{e}'}]\n\n if action is None:\n # in chat mode, the final result of last instructions should be updated to prompt history\n _ = self.prompt_generator.generate(llm_result, '')\n\n # for summarize\n display(llm_result, {}, idx, self.agent_type)\n return final_res\n\n if action in self.available_tool_list:\n action_args = self.parse_action_args(action_args)\n tool = self.tool_list[action]\n\n # TODO @wenmeng.zwm remove this hack logic for image generation\n if action == 'image_gen' and self.seed:\n action_args['seed'] = self.seed\n try:\n exec_result = tool(**action_args, remote=remote)\n if print_info:\n print(f'|exec_result: {exec_result}')\n\n # parse exec result and store result to agent state\n final_res.append(exec_result)\n self.parse_exec_result(exec_result)\n except Exception as e:\n exec_result = f'Action call error: {action}: {action_args}. \\n Error message: {e}'\n return [{'exec_result': exec_result}]\n else:\n exec_result = f\"Unknown action: '{action}'. \"\n return [{'exec_result': exec_result}]\n\n # display result\n display(llm_result, exec_result, idx, self.agent_type)\n\n def stream_run(self,\n uuid_str: str,\n task: str,\n remote: bool = True,\n print_info: bool = False,\n append_files: list = []) -> Dict:\n \"\"\"this is a stream version of run, which can be used in scenario like gradio.\n It will yield the result of each interaction, so that the caller can display the result\n\n Args:\n uuid_str: str,\n task (str): concrete task\n remote (bool, optional): whether to execute tool in remote mode. Defaults to True.\n print_info (bool, optional): whether to print prompt info. Defaults to False.\n files that individually used in each run, no need to record to global state\n\n Yields:\n Iterator[Dict]: iterator of llm response and tool execution result\n \"\"\"\n\n # retrieve tools\n tool_list = self.retrieve_tools(task)\n knowledge_list = self.get_knowledge(task)\n\n self.prompt_generator.init_prompt(\n task,\n tool_list,\n knowledge_list,\n append_files=append_files,\n )\n function_list = self.prompt_generator.get_function_list(tool_list)\n\n llm_result, exec_result = '', ''\n\n idx = 0\n\n while True:\n idx += 1\n llm_artifacts = self.prompt_generator.generate(\n llm_result, exec_result)\n if print_info:\n print(f'|LLM inputs in round {idx}:\\n{llm_artifacts}')\n\n llm_result = ''\n try:\n for s in self.llm.stream_generate(llm_artifacts,\n function_list):\n llm_result += s\n yield {'llm_text': s}\n except RuntimeError:\n s = self.llm.generate(llm_artifacts)\n llm_result += s\n yield {'llm_text': s}\n except Exception as e:\n yield {'llm_text': str(e)}\n\n # parse and get tool name and arguments\n try:\n action, action_args = self.output_parser.parse_response(\n llm_result)\n except ValueError as e:\n yield {'exec_result': f'{e}'}\n return\n\n if action is None:\n # in chat mode, the final result of last instructions should be updated to prompt history\n _ = self.prompt_generator.generate(llm_result, '')\n yield {'is_final': True}\n return\n\n if action in self.available_tool_list:\n # yield observation to as end of action input symbol asap\n yield {'llm_text': 'Observation: '}\n action_args = self.parse_action_args(action_args)\n tool = self.tool_list[action]\n\n action_args[\"uuid_str\"] = uuid_str\n # TODO @wenmeng.zwm remove this hack logic for image generation\n if action == 'image_gen' and self.seed:\n action_args['seed'] = self.seed\n try:\n exec_result = tool(**action_args, remote=remote)\n yield {'exec_result': exec_result}\n\n # parse exec result and update state\n self.parse_exec_result(exec_result)\n except Exception as e:\n exec_result = f'Action call error: {action}: {action_args}. \\n Error message: {e}'\n yield {'exec_result': exec_result}\n self.prompt_generator.reset()\n return\n else:\n exec_result = f\"Unknown action: '{action}'. \"\n yield {'exec_result': exec_result}\n self.prompt_generator.reset()\n return\n\n def reset(self):\n \"\"\"\n clear history and agent state\n \"\"\"\n self.prompt_generator.reset()\n self.agent_state = {}\n\n def parse_action_args(self, action_args):\n \"\"\"\n replace action_args in str to Image/Video/Audio Wrapper, so that tool can handle them\n \"\"\"\n parsed_action_args = {}\n for name, arg in action_args.items():\n try:\n true_arg = self.agent_state.get(arg, arg)\n except Exception as e:\n print(f'Error when parsing action args: {e}, using fall back')\n true_arg = arg\n parsed_action_args[name] = true_arg\n return parsed_action_args\n\n def parse_exec_result(self, exec_result, *args, **kwargs):\n \"\"\"\n update exec result to agent state.\n key is the str representation of the result.\n \"\"\"\n for k, v in exec_result.items():\n self.agent_state[str(v)] = v" }, { "identifier": "AgentType", "path": "modelscope_agent/agent_types.py", "snippet": "class AgentType(str, Enum):\n\n DEFAULT = 'default'\n \"\"\"\"\"\"\n\n MS_AGENT = 'ms-agent'\n \"\"\"An agent that uses the ModelScope-agent specific format does a reasoning step before acting .\n \"\"\"\n\n MRKL = 'mrkl'\n \"\"\"An agent that does a reasoning step before acting with mrkl\"\"\"\n\n REACT = 'react'\n \"\"\"An agent that does a reasoning step before acting with react\"\"\"\n\n Messages = 'messages'\n \"\"\"An agent optimized for using open AI functions.\"\"\"" }, { "identifier": "LLMFactory", "path": "modelscope_agent/llm/llm_factory.py", "snippet": "class LLMFactory:\n\n @staticmethod\n def build_llm(model_name, cfg):\n llm_type = cfg[model_name].pop('type')\n llm_cls = get_llm_cls(llm_type, model_name)\n llm_cfg = cfg[model_name]\n return llm_cls(cfg=llm_cfg)" }, { "identifier": "KnowledgeRetrieval", "path": "modelscope_agent/retrieve.py", "snippet": "class KnowledgeRetrieval(Retrieval):\n\n def __init__(self,\n docs,\n embedding: Embeddings = None,\n vs_cls: VectorStore = None,\n top_k: int = 5,\n vs_params: Dict = {}):\n super().__init__(embedding, vs_cls, top_k, vs_params)\n self.construct(docs)\n\n @classmethod\n def from_file(cls,\n file_path: Union[str, list],\n embedding: Embeddings = None,\n vs_cls: VectorStore = None,\n top_k: int = 5,\n vs_params: Dict = {}):\n\n textsplitter = CharacterTextSplitter()\n all_files = []\n if isinstance(file_path, str) and os.path.isfile(file_path):\n all_files.append(file_path)\n elif isinstance(file_path, list):\n all_files = file_path\n elif os.path.isdir(file_path):\n for root, dirs, files in os.walk(file_path):\n for f in files:\n all_files.append(os.path.join(root, f))\n else:\n raise ValueError('file_path must be a file or a directory')\n\n docs = []\n for f in all_files:\n if f.lower().endswith('.txt'):\n loader = TextLoader(f, autodetect_encoding=True)\n docs += (loader.load_and_split(textsplitter))\n elif f.lower().endswith('.md'):\n loader = UnstructuredFileLoader(f, mode='elements')\n docs += loader.load()\n elif f.lower().endswith('.pdf'):\n loader = PyPDFLoader(f)\n docs += (loader.load_and_split(textsplitter))\n else:\n print(f'not support file type: {f}, will be support soon')\n\n if len(docs) == 0:\n return None\n else:\n return cls(docs, embedding, vs_cls, top_k, vs_params)" }, { "identifier": "OpenAPIPluginTool", "path": "modelscope_agent/tools/openapi_plugin.py", "snippet": "class OpenAPIPluginTool(Tool):\n \"\"\"\n openapi schema tool\n \"\"\"\n name: str = 'api tool'\n description: str = 'This is a api tool that ...'\n parameters: list = []\n\n def __init__(self, cfg, name):\n self.name = name\n self.cfg = cfg.get(self.name, {})\n self.is_remote_tool = self.cfg.get('is_remote_tool', False)\n # remote call\n self.url = self.cfg.get('url', '')\n self.token = self.cfg.get('token', '')\n self.header = self.cfg.get('header', '')\n self.method = self.cfg.get('method', '')\n self.parameters = self.cfg.get('parameters', [])\n self.description = self.cfg.get('description',\n 'This is a api tool that ...')\n self.responses_param = self.cfg.get('responses_param', [])\n try:\n all_para = {\n 'name': self.name,\n 'description': self.description,\n 'parameters': self.parameters\n }\n self.tool_schema = ToolSchema(**all_para)\n except ValidationError:\n raise ValueError(f'Error when parsing parameters of {self.name}')\n self._str = self.tool_schema.model_dump_json()\n self._function = self.parse_pydantic_model_to_openai_function(all_para)\n\n def _remote_call(self, *args, **kwargs):\n if self.url == '':\n raise ValueError(\n f\"Could not use remote call for {self.name} since this tool doesn't have a remote endpoint\"\n )\n\n remote_parsed_input = json.dumps(\n self._remote_parse_input(*args, **kwargs))\n origin_result = None\n if self.method == 'POST':\n retry_times = MAX_RETRY_TIMES\n while retry_times:\n retry_times -= 1\n try:\n print(f'data: {kwargs}')\n print(f'header: {self.header}')\n response = requests.request(\n 'POST',\n url=self.url,\n headers=self.header,\n data=remote_parsed_input)\n\n if response.status_code != requests.codes.ok:\n response.raise_for_status()\n origin_result = json.loads(\n response.content.decode('utf-8'))\n\n final_result = self._parse_output(\n origin_result, remote=True)\n return final_result\n except Timeout:\n continue\n except RequestException as e:\n raise ValueError(\n f'Remote call failed with error code: {e.response.status_code},\\\n error message: {e.response.content.decode(\"utf-8\")}')\n\n raise ValueError(\n 'Remote call max retry times exceeded! Please try to use local call.'\n )\n elif self.method == 'GET':\n retry_times = MAX_RETRY_TIMES\n\n new_url = self.url\n matches = re.findall(r'\\{(.*?)\\}', self.url)\n for match in matches:\n if match in kwargs:\n new_url = new_url.replace('{' + match + '}', kwargs[match])\n else:\n print(\n f'The parameter {match} was not generated by the model.'\n )\n\n while retry_times:\n retry_times -= 1\n try:\n print('GET:', new_url)\n print('GET:', self.url)\n\n response = requests.request(\n 'GET',\n url=new_url,\n headers=self.header,\n params=remote_parsed_input)\n if response.status_code != requests.codes.ok:\n response.raise_for_status()\n\n origin_result = json.loads(\n response.content.decode('utf-8'))\n\n final_result = self._parse_output(\n origin_result, remote=True)\n return final_result\n except Timeout:\n continue\n except RequestException as e:\n raise ValueError(\n f'Remote call failed with error code: {e.response.status_code},\\\n error message: {e.response.content.decode(\"utf-8\")}')\n\n raise ValueError(\n 'Remote call max retry times exceeded! Please try to use local call.'\n )\n else:\n raise ValueError(\n 'Remote call method is invalid!We have POST and GET method.')\n\n def _remote_parse_input(self, *args, **kwargs):\n restored_dict = {}\n for key, value in kwargs.items():\n if '.' in key:\n # Split keys by \".\" and create nested dictionary structures\n keys = key.split('.')\n temp_dict = restored_dict\n for k in keys[:-1]:\n temp_dict = temp_dict.setdefault(k, {})\n temp_dict[keys[-1]] = value\n else:\n # f the key does not contain \".\", directly store the key-value pair into restored_dict\n restored_dict[key] = value\n kwargs = restored_dict\n print('传给tool的参数:', kwargs)\n return kwargs" } ]
import copy import os import gradio as gr from config_utils import parse_configuration from custom_prompt import (DEFAULT_EXEC_TEMPLATE, DEFAULT_SYSTEM_TEMPLATE, DEFAULT_USER_TEMPLATE, CustomPromptGenerator, parse_role_config) from langchain.embeddings import ModelScopeEmbeddings from langchain.vectorstores import FAISS from modelscope_agent.agent import AgentExecutor from modelscope_agent.agent_types import AgentType from modelscope_agent.llm import LLMFactory from modelscope_agent.retrieve import KnowledgeRetrieval from modelscope_agent.tools.openapi_plugin import OpenAPIPluginTool
8,583
# init user chatbot_agent def init_user_chatbot_agent(uuid_str=''): builder_cfg, model_cfg, tool_cfg, available_tool_list, plugin_cfg, available_plugin_list = parse_configuration( uuid_str) # set top_p and stop_words for role play model_cfg[builder_cfg.model]['generate_cfg']['top_p'] = 0.5 model_cfg[builder_cfg.model]['generate_cfg']['stop'] = 'Observation' # build model print(f'using model {builder_cfg.model}') print(f'model config {model_cfg[builder_cfg.model]}') # # check configuration # if builder_cfg.model in ['qwen-max', 'qwen-72b-api', 'qwen-14b-api', 'qwen-plus']: # if 'DASHSCOPE_API_KEY' not in os.environ: # raise gr.Error('DASHSCOPE_API_KEY should be set via setting environment variable') try: llm = LLMFactory.build_llm(builder_cfg.model, model_cfg) except Exception as e: raise gr.Error(str(e)) # build prompt with zero shot react template instruction_template = parse_role_config(builder_cfg) prompt_generator = CustomPromptGenerator( system_template=DEFAULT_SYSTEM_TEMPLATE, user_template=DEFAULT_USER_TEMPLATE,
# init user chatbot_agent def init_user_chatbot_agent(uuid_str=''): builder_cfg, model_cfg, tool_cfg, available_tool_list, plugin_cfg, available_plugin_list = parse_configuration( uuid_str) # set top_p and stop_words for role play model_cfg[builder_cfg.model]['generate_cfg']['top_p'] = 0.5 model_cfg[builder_cfg.model]['generate_cfg']['stop'] = 'Observation' # build model print(f'using model {builder_cfg.model}') print(f'model config {model_cfg[builder_cfg.model]}') # # check configuration # if builder_cfg.model in ['qwen-max', 'qwen-72b-api', 'qwen-14b-api', 'qwen-plus']: # if 'DASHSCOPE_API_KEY' not in os.environ: # raise gr.Error('DASHSCOPE_API_KEY should be set via setting environment variable') try: llm = LLMFactory.build_llm(builder_cfg.model, model_cfg) except Exception as e: raise gr.Error(str(e)) # build prompt with zero shot react template instruction_template = parse_role_config(builder_cfg) prompt_generator = CustomPromptGenerator( system_template=DEFAULT_SYSTEM_TEMPLATE, user_template=DEFAULT_USER_TEMPLATE,
exec_template=DEFAULT_EXEC_TEMPLATE,
1
2023-12-12 04:24:00+00:00
12k